1 /*
2  * Copyright 2012 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "nv50.h"
25 #include "outp.h"
26 #include "outpdp.h"
27 
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <core/ramht.h>
31 #include <subdev/bios.h>
32 #include <subdev/bios/dcb.h>
33 #include <subdev/bios/disp.h>
34 #include <subdev/bios/init.h>
35 #include <subdev/bios/pll.h>
36 #include <subdev/devinit.h>
37 #include <subdev/timer.h>
38 
39 #include <nvif/class.h>
40 #include <nvif/unpack.h>
41 
42 /*******************************************************************************
43  * EVO channel base class
44  ******************************************************************************/
45 
46 static void
gf110_disp_chan_uevent_fini(struct nvkm_event * event,int type,int index)47 gf110_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
48 {
49 	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
50 	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
51 	nv_wr32(priv, 0x61008c, 0x00000001 << index);
52 }
53 
54 static void
gf110_disp_chan_uevent_init(struct nvkm_event * event,int types,int index)55 gf110_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
56 {
57 	struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
58 	nv_wr32(priv, 0x61008c, 0x00000001 << index);
59 	nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
60 }
61 
62 const struct nvkm_event_func
63 gf110_disp_chan_uevent = {
64 	.ctor = nv50_disp_chan_uevent_ctor,
65 	.init = gf110_disp_chan_uevent_init,
66 	.fini = gf110_disp_chan_uevent_fini,
67 };
68 
69 /*******************************************************************************
70  * EVO DMA channel base class
71  ******************************************************************************/
72 
73 static int
gf110_disp_dmac_object_attach(struct nvkm_object * parent,struct nvkm_object * object,u32 name)74 gf110_disp_dmac_object_attach(struct nvkm_object *parent,
75 			      struct nvkm_object *object, u32 name)
76 {
77 	struct nv50_disp_base *base = (void *)parent->parent;
78 	struct nv50_disp_chan *chan = (void *)parent;
79 	u32 addr = nv_gpuobj(object)->node->offset;
80 	u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
81 	return nvkm_ramht_insert(base->ramht, chan->chid, name, data);
82 }
83 
84 static void
gf110_disp_dmac_object_detach(struct nvkm_object * parent,int cookie)85 gf110_disp_dmac_object_detach(struct nvkm_object *parent, int cookie)
86 {
87 	struct nv50_disp_base *base = (void *)parent->parent;
88 	nvkm_ramht_remove(base->ramht, cookie);
89 }
90 
91 static int
gf110_disp_dmac_init(struct nvkm_object * object)92 gf110_disp_dmac_init(struct nvkm_object *object)
93 {
94 	struct nv50_disp_priv *priv = (void *)object->engine;
95 	struct nv50_disp_dmac *dmac = (void *)object;
96 	int chid = dmac->base.chid;
97 	int ret;
98 
99 	ret = nv50_disp_chan_init(&dmac->base);
100 	if (ret)
101 		return ret;
102 
103 	/* enable error reporting */
104 	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
105 
106 	/* initialise channel for dma command submission */
107 	nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
108 	nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
109 	nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
110 	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
111 	nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
112 	nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
113 
114 	/* wait for it to go inactive */
115 	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
116 		nv_error(dmac, "init: 0x%08x\n",
117 			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
118 		return -EBUSY;
119 	}
120 
121 	return 0;
122 }
123 
124 static int
gf110_disp_dmac_fini(struct nvkm_object * object,bool suspend)125 gf110_disp_dmac_fini(struct nvkm_object *object, bool suspend)
126 {
127 	struct nv50_disp_priv *priv = (void *)object->engine;
128 	struct nv50_disp_dmac *dmac = (void *)object;
129 	int chid = dmac->base.chid;
130 
131 	/* deactivate channel */
132 	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
133 	nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
134 	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
135 		nv_error(dmac, "fini: 0x%08x\n",
136 			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
137 		if (suspend)
138 			return -EBUSY;
139 	}
140 
141 	/* disable error reporting and completion notification */
142 	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
143 	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
144 
145 	return nv50_disp_chan_fini(&dmac->base, suspend);
146 }
147 
148 /*******************************************************************************
149  * EVO master channel object
150  ******************************************************************************/
151 
152 const struct nv50_disp_mthd_list
153 gf110_disp_core_mthd_base = {
154 	.mthd = 0x0000,
155 	.addr = 0x000000,
156 	.data = {
157 		{ 0x0080, 0x660080 },
158 		{ 0x0084, 0x660084 },
159 		{ 0x0088, 0x660088 },
160 		{ 0x008c, 0x000000 },
161 		{}
162 	}
163 };
164 
165 const struct nv50_disp_mthd_list
166 gf110_disp_core_mthd_dac = {
167 	.mthd = 0x0020,
168 	.addr = 0x000020,
169 	.data = {
170 		{ 0x0180, 0x660180 },
171 		{ 0x0184, 0x660184 },
172 		{ 0x0188, 0x660188 },
173 		{ 0x0190, 0x660190 },
174 		{}
175 	}
176 };
177 
178 const struct nv50_disp_mthd_list
179 gf110_disp_core_mthd_sor = {
180 	.mthd = 0x0020,
181 	.addr = 0x000020,
182 	.data = {
183 		{ 0x0200, 0x660200 },
184 		{ 0x0204, 0x660204 },
185 		{ 0x0208, 0x660208 },
186 		{ 0x0210, 0x660210 },
187 		{}
188 	}
189 };
190 
191 const struct nv50_disp_mthd_list
192 gf110_disp_core_mthd_pior = {
193 	.mthd = 0x0020,
194 	.addr = 0x000020,
195 	.data = {
196 		{ 0x0300, 0x660300 },
197 		{ 0x0304, 0x660304 },
198 		{ 0x0308, 0x660308 },
199 		{ 0x0310, 0x660310 },
200 		{}
201 	}
202 };
203 
204 static const struct nv50_disp_mthd_list
205 gf110_disp_core_mthd_head = {
206 	.mthd = 0x0300,
207 	.addr = 0x000300,
208 	.data = {
209 		{ 0x0400, 0x660400 },
210 		{ 0x0404, 0x660404 },
211 		{ 0x0408, 0x660408 },
212 		{ 0x040c, 0x66040c },
213 		{ 0x0410, 0x660410 },
214 		{ 0x0414, 0x660414 },
215 		{ 0x0418, 0x660418 },
216 		{ 0x041c, 0x66041c },
217 		{ 0x0420, 0x660420 },
218 		{ 0x0424, 0x660424 },
219 		{ 0x0428, 0x660428 },
220 		{ 0x042c, 0x66042c },
221 		{ 0x0430, 0x660430 },
222 		{ 0x0434, 0x660434 },
223 		{ 0x0438, 0x660438 },
224 		{ 0x0440, 0x660440 },
225 		{ 0x0444, 0x660444 },
226 		{ 0x0448, 0x660448 },
227 		{ 0x044c, 0x66044c },
228 		{ 0x0450, 0x660450 },
229 		{ 0x0454, 0x660454 },
230 		{ 0x0458, 0x660458 },
231 		{ 0x045c, 0x66045c },
232 		{ 0x0460, 0x660460 },
233 		{ 0x0468, 0x660468 },
234 		{ 0x046c, 0x66046c },
235 		{ 0x0470, 0x660470 },
236 		{ 0x0474, 0x660474 },
237 		{ 0x0480, 0x660480 },
238 		{ 0x0484, 0x660484 },
239 		{ 0x048c, 0x66048c },
240 		{ 0x0490, 0x660490 },
241 		{ 0x0494, 0x660494 },
242 		{ 0x0498, 0x660498 },
243 		{ 0x04b0, 0x6604b0 },
244 		{ 0x04b8, 0x6604b8 },
245 		{ 0x04bc, 0x6604bc },
246 		{ 0x04c0, 0x6604c0 },
247 		{ 0x04c4, 0x6604c4 },
248 		{ 0x04c8, 0x6604c8 },
249 		{ 0x04d0, 0x6604d0 },
250 		{ 0x04d4, 0x6604d4 },
251 		{ 0x04e0, 0x6604e0 },
252 		{ 0x04e4, 0x6604e4 },
253 		{ 0x04e8, 0x6604e8 },
254 		{ 0x04ec, 0x6604ec },
255 		{ 0x04f0, 0x6604f0 },
256 		{ 0x04f4, 0x6604f4 },
257 		{ 0x04f8, 0x6604f8 },
258 		{ 0x04fc, 0x6604fc },
259 		{ 0x0500, 0x660500 },
260 		{ 0x0504, 0x660504 },
261 		{ 0x0508, 0x660508 },
262 		{ 0x050c, 0x66050c },
263 		{ 0x0510, 0x660510 },
264 		{ 0x0514, 0x660514 },
265 		{ 0x0518, 0x660518 },
266 		{ 0x051c, 0x66051c },
267 		{ 0x052c, 0x66052c },
268 		{ 0x0530, 0x660530 },
269 		{ 0x054c, 0x66054c },
270 		{ 0x0550, 0x660550 },
271 		{ 0x0554, 0x660554 },
272 		{ 0x0558, 0x660558 },
273 		{ 0x055c, 0x66055c },
274 		{}
275 	}
276 };
277 
278 static const struct nv50_disp_mthd_chan
279 gf110_disp_core_mthd_chan = {
280 	.name = "Core",
281 	.addr = 0x000000,
282 	.data = {
283 		{ "Global", 1, &gf110_disp_core_mthd_base },
284 		{    "DAC", 3, &gf110_disp_core_mthd_dac  },
285 		{    "SOR", 8, &gf110_disp_core_mthd_sor  },
286 		{   "PIOR", 4, &gf110_disp_core_mthd_pior },
287 		{   "HEAD", 4, &gf110_disp_core_mthd_head },
288 		{}
289 	}
290 };
291 
292 static int
gf110_disp_core_init(struct nvkm_object * object)293 gf110_disp_core_init(struct nvkm_object *object)
294 {
295 	struct nv50_disp_priv *priv = (void *)object->engine;
296 	struct nv50_disp_dmac *mast = (void *)object;
297 	int ret;
298 
299 	ret = nv50_disp_chan_init(&mast->base);
300 	if (ret)
301 		return ret;
302 
303 	/* enable error reporting */
304 	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
305 
306 	/* initialise channel for dma command submission */
307 	nv_wr32(priv, 0x610494, mast->push);
308 	nv_wr32(priv, 0x610498, 0x00010000);
309 	nv_wr32(priv, 0x61049c, 0x00000001);
310 	nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
311 	nv_wr32(priv, 0x640000, 0x00000000);
312 	nv_wr32(priv, 0x610490, 0x01000013);
313 
314 	/* wait for it to go inactive */
315 	if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
316 		nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
317 		return -EBUSY;
318 	}
319 
320 	return 0;
321 }
322 
323 static int
gf110_disp_core_fini(struct nvkm_object * object,bool suspend)324 gf110_disp_core_fini(struct nvkm_object *object, bool suspend)
325 {
326 	struct nv50_disp_priv *priv = (void *)object->engine;
327 	struct nv50_disp_dmac *mast = (void *)object;
328 
329 	/* deactivate channel */
330 	nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
331 	nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
332 	if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
333 		nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
334 		if (suspend)
335 			return -EBUSY;
336 	}
337 
338 	/* disable error reporting and completion notification */
339 	nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
340 	nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
341 
342 	return nv50_disp_chan_fini(&mast->base, suspend);
343 }
344 
345 struct nv50_disp_chan_impl
346 gf110_disp_core_ofuncs = {
347 	.base.ctor = nv50_disp_core_ctor,
348 	.base.dtor = nv50_disp_dmac_dtor,
349 	.base.init = gf110_disp_core_init,
350 	.base.fini = gf110_disp_core_fini,
351 	.base.ntfy = nv50_disp_chan_ntfy,
352 	.base.map  = nv50_disp_chan_map,
353 	.base.rd32 = nv50_disp_chan_rd32,
354 	.base.wr32 = nv50_disp_chan_wr32,
355 	.chid = 0,
356 	.attach = gf110_disp_dmac_object_attach,
357 	.detach = gf110_disp_dmac_object_detach,
358 };
359 
360 /*******************************************************************************
361  * EVO sync channel objects
362  ******************************************************************************/
363 
364 static const struct nv50_disp_mthd_list
365 gf110_disp_base_mthd_base = {
366 	.mthd = 0x0000,
367 	.addr = 0x000000,
368 	.data = {
369 		{ 0x0080, 0x661080 },
370 		{ 0x0084, 0x661084 },
371 		{ 0x0088, 0x661088 },
372 		{ 0x008c, 0x66108c },
373 		{ 0x0090, 0x661090 },
374 		{ 0x0094, 0x661094 },
375 		{ 0x00a0, 0x6610a0 },
376 		{ 0x00a4, 0x6610a4 },
377 		{ 0x00c0, 0x6610c0 },
378 		{ 0x00c4, 0x6610c4 },
379 		{ 0x00c8, 0x6610c8 },
380 		{ 0x00cc, 0x6610cc },
381 		{ 0x00e0, 0x6610e0 },
382 		{ 0x00e4, 0x6610e4 },
383 		{ 0x00e8, 0x6610e8 },
384 		{ 0x00ec, 0x6610ec },
385 		{ 0x00fc, 0x6610fc },
386 		{ 0x0100, 0x661100 },
387 		{ 0x0104, 0x661104 },
388 		{ 0x0108, 0x661108 },
389 		{ 0x010c, 0x66110c },
390 		{ 0x0110, 0x661110 },
391 		{ 0x0114, 0x661114 },
392 		{ 0x0118, 0x661118 },
393 		{ 0x011c, 0x66111c },
394 		{ 0x0130, 0x661130 },
395 		{ 0x0134, 0x661134 },
396 		{ 0x0138, 0x661138 },
397 		{ 0x013c, 0x66113c },
398 		{ 0x0140, 0x661140 },
399 		{ 0x0144, 0x661144 },
400 		{ 0x0148, 0x661148 },
401 		{ 0x014c, 0x66114c },
402 		{ 0x0150, 0x661150 },
403 		{ 0x0154, 0x661154 },
404 		{ 0x0158, 0x661158 },
405 		{ 0x015c, 0x66115c },
406 		{ 0x0160, 0x661160 },
407 		{ 0x0164, 0x661164 },
408 		{ 0x0168, 0x661168 },
409 		{ 0x016c, 0x66116c },
410 		{}
411 	}
412 };
413 
414 static const struct nv50_disp_mthd_list
415 gf110_disp_base_mthd_image = {
416 	.mthd = 0x0020,
417 	.addr = 0x000020,
418 	.data = {
419 		{ 0x0400, 0x661400 },
420 		{ 0x0404, 0x661404 },
421 		{ 0x0408, 0x661408 },
422 		{ 0x040c, 0x66140c },
423 		{ 0x0410, 0x661410 },
424 		{}
425 	}
426 };
427 
428 const struct nv50_disp_mthd_chan
429 gf110_disp_base_mthd_chan = {
430 	.name = "Base",
431 	.addr = 0x001000,
432 	.data = {
433 		{ "Global", 1, &gf110_disp_base_mthd_base },
434 		{  "Image", 2, &gf110_disp_base_mthd_image },
435 		{}
436 	}
437 };
438 
439 struct nv50_disp_chan_impl
440 gf110_disp_base_ofuncs = {
441 	.base.ctor = nv50_disp_base_ctor,
442 	.base.dtor = nv50_disp_dmac_dtor,
443 	.base.init = gf110_disp_dmac_init,
444 	.base.fini = gf110_disp_dmac_fini,
445 	.base.ntfy = nv50_disp_chan_ntfy,
446 	.base.map  = nv50_disp_chan_map,
447 	.base.rd32 = nv50_disp_chan_rd32,
448 	.base.wr32 = nv50_disp_chan_wr32,
449 	.chid = 1,
450 	.attach = gf110_disp_dmac_object_attach,
451 	.detach = gf110_disp_dmac_object_detach,
452 };
453 
454 /*******************************************************************************
455  * EVO overlay channel objects
456  ******************************************************************************/
457 
458 static const struct nv50_disp_mthd_list
459 gf110_disp_ovly_mthd_base = {
460 	.mthd = 0x0000,
461 	.data = {
462 		{ 0x0080, 0x665080 },
463 		{ 0x0084, 0x665084 },
464 		{ 0x0088, 0x665088 },
465 		{ 0x008c, 0x66508c },
466 		{ 0x0090, 0x665090 },
467 		{ 0x0094, 0x665094 },
468 		{ 0x00a0, 0x6650a0 },
469 		{ 0x00a4, 0x6650a4 },
470 		{ 0x00b0, 0x6650b0 },
471 		{ 0x00b4, 0x6650b4 },
472 		{ 0x00b8, 0x6650b8 },
473 		{ 0x00c0, 0x6650c0 },
474 		{ 0x00e0, 0x6650e0 },
475 		{ 0x00e4, 0x6650e4 },
476 		{ 0x00e8, 0x6650e8 },
477 		{ 0x0100, 0x665100 },
478 		{ 0x0104, 0x665104 },
479 		{ 0x0108, 0x665108 },
480 		{ 0x010c, 0x66510c },
481 		{ 0x0110, 0x665110 },
482 		{ 0x0118, 0x665118 },
483 		{ 0x011c, 0x66511c },
484 		{ 0x0120, 0x665120 },
485 		{ 0x0124, 0x665124 },
486 		{ 0x0130, 0x665130 },
487 		{ 0x0134, 0x665134 },
488 		{ 0x0138, 0x665138 },
489 		{ 0x013c, 0x66513c },
490 		{ 0x0140, 0x665140 },
491 		{ 0x0144, 0x665144 },
492 		{ 0x0148, 0x665148 },
493 		{ 0x014c, 0x66514c },
494 		{ 0x0150, 0x665150 },
495 		{ 0x0154, 0x665154 },
496 		{ 0x0158, 0x665158 },
497 		{ 0x015c, 0x66515c },
498 		{ 0x0160, 0x665160 },
499 		{ 0x0164, 0x665164 },
500 		{ 0x0168, 0x665168 },
501 		{ 0x016c, 0x66516c },
502 		{ 0x0400, 0x665400 },
503 		{ 0x0408, 0x665408 },
504 		{ 0x040c, 0x66540c },
505 		{ 0x0410, 0x665410 },
506 		{}
507 	}
508 };
509 
510 static const struct nv50_disp_mthd_chan
511 gf110_disp_ovly_mthd_chan = {
512 	.name = "Overlay",
513 	.addr = 0x001000,
514 	.data = {
515 		{ "Global", 1, &gf110_disp_ovly_mthd_base },
516 		{}
517 	}
518 };
519 
520 struct nv50_disp_chan_impl
521 gf110_disp_ovly_ofuncs = {
522 	.base.ctor = nv50_disp_ovly_ctor,
523 	.base.dtor = nv50_disp_dmac_dtor,
524 	.base.init = gf110_disp_dmac_init,
525 	.base.fini = gf110_disp_dmac_fini,
526 	.base.ntfy = nv50_disp_chan_ntfy,
527 	.base.map  = nv50_disp_chan_map,
528 	.base.rd32 = nv50_disp_chan_rd32,
529 	.base.wr32 = nv50_disp_chan_wr32,
530 	.chid = 5,
531 	.attach = gf110_disp_dmac_object_attach,
532 	.detach = gf110_disp_dmac_object_detach,
533 };
534 
535 /*******************************************************************************
536  * EVO PIO channel base class
537  ******************************************************************************/
538 
539 static int
gf110_disp_pioc_init(struct nvkm_object * object)540 gf110_disp_pioc_init(struct nvkm_object *object)
541 {
542 	struct nv50_disp_priv *priv = (void *)object->engine;
543 	struct nv50_disp_pioc *pioc = (void *)object;
544 	int chid = pioc->base.chid;
545 	int ret;
546 
547 	ret = nv50_disp_chan_init(&pioc->base);
548 	if (ret)
549 		return ret;
550 
551 	/* enable error reporting */
552 	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
553 
554 	/* activate channel */
555 	nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
556 	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
557 		nv_error(pioc, "init: 0x%08x\n",
558 			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
559 		return -EBUSY;
560 	}
561 
562 	return 0;
563 }
564 
565 static int
gf110_disp_pioc_fini(struct nvkm_object * object,bool suspend)566 gf110_disp_pioc_fini(struct nvkm_object *object, bool suspend)
567 {
568 	struct nv50_disp_priv *priv = (void *)object->engine;
569 	struct nv50_disp_pioc *pioc = (void *)object;
570 	int chid = pioc->base.chid;
571 
572 	nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
573 	if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
574 		nv_error(pioc, "timeout: 0x%08x\n",
575 			 nv_rd32(priv, 0x610490 + (chid * 0x10)));
576 		if (suspend)
577 			return -EBUSY;
578 	}
579 
580 	/* disable error reporting and completion notification */
581 	nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
582 	nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
583 
584 	return nv50_disp_chan_fini(&pioc->base, suspend);
585 }
586 
587 /*******************************************************************************
588  * EVO immediate overlay channel objects
589  ******************************************************************************/
590 
591 struct nv50_disp_chan_impl
592 gf110_disp_oimm_ofuncs = {
593 	.base.ctor = nv50_disp_oimm_ctor,
594 	.base.dtor = nv50_disp_pioc_dtor,
595 	.base.init = gf110_disp_pioc_init,
596 	.base.fini = gf110_disp_pioc_fini,
597 	.base.ntfy = nv50_disp_chan_ntfy,
598 	.base.map  = nv50_disp_chan_map,
599 	.base.rd32 = nv50_disp_chan_rd32,
600 	.base.wr32 = nv50_disp_chan_wr32,
601 	.chid = 9,
602 };
603 
604 /*******************************************************************************
605  * EVO cursor channel objects
606  ******************************************************************************/
607 
608 struct nv50_disp_chan_impl
609 gf110_disp_curs_ofuncs = {
610 	.base.ctor = nv50_disp_curs_ctor,
611 	.base.dtor = nv50_disp_pioc_dtor,
612 	.base.init = gf110_disp_pioc_init,
613 	.base.fini = gf110_disp_pioc_fini,
614 	.base.ntfy = nv50_disp_chan_ntfy,
615 	.base.map  = nv50_disp_chan_map,
616 	.base.rd32 = nv50_disp_chan_rd32,
617 	.base.wr32 = nv50_disp_chan_wr32,
618 	.chid = 13,
619 };
620 
621 /*******************************************************************************
622  * Base display object
623  ******************************************************************************/
624 
625 int
gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)626 gf110_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
627 {
628 	const u32 total  = nv_rd32(priv, 0x640414 + (head * 0x300));
629 	const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
630 	const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
631 	union {
632 		struct nv04_disp_scanoutpos_v0 v0;
633 	} *args = data;
634 	int ret;
635 
636 	nv_ioctl(object, "disp scanoutpos size %d\n", size);
637 	if (nvif_unpack(args->v0, 0, 0, false)) {
638 		nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
639 		args->v0.vblanke = (blanke & 0xffff0000) >> 16;
640 		args->v0.hblanke = (blanke & 0x0000ffff);
641 		args->v0.vblanks = (blanks & 0xffff0000) >> 16;
642 		args->v0.hblanks = (blanks & 0x0000ffff);
643 		args->v0.vtotal  = ( total & 0xffff0000) >> 16;
644 		args->v0.htotal  = ( total & 0x0000ffff);
645 		args->v0.time[0] = ktime_to_ns(ktime_get());
646 		args->v0.vline = /* vline read locks hline */
647 			nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
648 		args->v0.time[1] = ktime_to_ns(ktime_get());
649 		args->v0.hline =
650 			nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
651 	} else
652 		return ret;
653 
654 	return 0;
655 }
656 
657 static int
gf110_disp_main_init(struct nvkm_object * object)658 gf110_disp_main_init(struct nvkm_object *object)
659 {
660 	struct nv50_disp_priv *priv = (void *)object->engine;
661 	struct nv50_disp_base *base = (void *)object;
662 	int ret, i;
663 	u32 tmp;
664 
665 	ret = nvkm_parent_init(&base->base);
666 	if (ret)
667 		return ret;
668 
669 	/* The below segments of code copying values from one register to
670 	 * another appear to inform EVO of the display capabilities or
671 	 * something similar.
672 	 */
673 
674 	/* ... CRTC caps */
675 	for (i = 0; i < priv->head.nr; i++) {
676 		tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
677 		nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
678 		tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
679 		nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
680 		tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
681 		nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
682 	}
683 
684 	/* ... DAC caps */
685 	for (i = 0; i < priv->dac.nr; i++) {
686 		tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
687 		nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
688 	}
689 
690 	/* ... SOR caps */
691 	for (i = 0; i < priv->sor.nr; i++) {
692 		tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
693 		nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
694 	}
695 
696 	/* steal display away from vbios, or something like that */
697 	if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
698 		nv_wr32(priv, 0x6100ac, 0x00000100);
699 		nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
700 		if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
701 			nv_error(priv, "timeout acquiring display\n");
702 			return -EBUSY;
703 		}
704 	}
705 
706 	/* point at display engine memory area (hash table, objects) */
707 	nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
708 
709 	/* enable supervisor interrupts, disable everything else */
710 	nv_wr32(priv, 0x610090, 0x00000000);
711 	nv_wr32(priv, 0x6100a0, 0x00000000);
712 	nv_wr32(priv, 0x6100b0, 0x00000307);
713 
714 	/* disable underflow reporting, preventing an intermittent issue
715 	 * on some gk104 boards where the production vbios left this
716 	 * setting enabled by default.
717 	 *
718 	 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
719 	 */
720 	for (i = 0; i < priv->head.nr; i++)
721 		nv_mask(priv, 0x616308 + (i * 0x800), 0x00000111, 0x00000010);
722 
723 	return 0;
724 }
725 
726 static int
gf110_disp_main_fini(struct nvkm_object * object,bool suspend)727 gf110_disp_main_fini(struct nvkm_object *object, bool suspend)
728 {
729 	struct nv50_disp_priv *priv = (void *)object->engine;
730 	struct nv50_disp_base *base = (void *)object;
731 
732 	/* disable all interrupts */
733 	nv_wr32(priv, 0x6100b0, 0x00000000);
734 
735 	return nvkm_parent_fini(&base->base, suspend);
736 }
737 
738 struct nvkm_ofuncs
739 gf110_disp_main_ofuncs = {
740 	.ctor = nv50_disp_main_ctor,
741 	.dtor = nv50_disp_main_dtor,
742 	.init = gf110_disp_main_init,
743 	.fini = gf110_disp_main_fini,
744 	.mthd = nv50_disp_main_mthd,
745 	.ntfy = nvkm_disp_ntfy,
746 };
747 
748 static struct nvkm_oclass
749 gf110_disp_main_oclass[] = {
750 	{ GF110_DISP, &gf110_disp_main_ofuncs },
751 	{}
752 };
753 
754 static struct nvkm_oclass
755 gf110_disp_sclass[] = {
756 	{ GF110_DISP_CORE_CHANNEL_DMA, &gf110_disp_core_ofuncs.base },
757 	{ GF110_DISP_BASE_CHANNEL_DMA, &gf110_disp_base_ofuncs.base },
758 	{ GF110_DISP_OVERLAY_CONTROL_DMA, &gf110_disp_ovly_ofuncs.base },
759 	{ GF110_DISP_OVERLAY, &gf110_disp_oimm_ofuncs.base },
760 	{ GF110_DISP_CURSOR, &gf110_disp_curs_ofuncs.base },
761 	{}
762 };
763 
764 /*******************************************************************************
765  * Display engine implementation
766  ******************************************************************************/
767 
768 static void
gf110_disp_vblank_init(struct nvkm_event * event,int type,int head)769 gf110_disp_vblank_init(struct nvkm_event *event, int type, int head)
770 {
771 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
772 	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
773 }
774 
775 static void
gf110_disp_vblank_fini(struct nvkm_event * event,int type,int head)776 gf110_disp_vblank_fini(struct nvkm_event *event, int type, int head)
777 {
778 	struct nvkm_disp *disp = container_of(event, typeof(*disp), vblank);
779 	nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
780 }
781 
782 const struct nvkm_event_func
783 gf110_disp_vblank_func = {
784 	.ctor = nvkm_disp_vblank_ctor,
785 	.init = gf110_disp_vblank_init,
786 	.fini = gf110_disp_vblank_fini,
787 };
788 
789 static struct nvkm_output *
exec_lookup(struct nv50_disp_priv * priv,int head,int or,u32 ctrl,u32 * data,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * info)790 exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
791 	    u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
792 	    struct nvbios_outp *info)
793 {
794 	struct nvkm_bios *bios = nvkm_bios(priv);
795 	struct nvkm_output *outp;
796 	u16 mask, type;
797 
798 	if (or < 4) {
799 		type = DCB_OUTPUT_ANALOG;
800 		mask = 0;
801 	} else {
802 		or -= 4;
803 		switch (ctrl & 0x00000f00) {
804 		case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
805 		case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
806 		case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
807 		case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
808 		case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
809 		case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 		default:
811 			nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 			return 0x0000;
813 		}
814 	}
815 
816 	mask  = 0x00c0 & (mask << 6);
817 	mask |= 0x0001 << or;
818 	mask |= 0x0100 << head;
819 
820 	list_for_each_entry(outp, &priv->base.outp, head) {
821 		if ((outp->info.hasht & 0xff) == type &&
822 		    (outp->info.hashm & mask) == mask) {
823 			*data = nvbios_outp_match(bios, outp->info.hasht,
824 							outp->info.hashm,
825 						  ver, hdr, cnt, len, info);
826 			if (!*data)
827 				return NULL;
828 			return outp;
829 		}
830 	}
831 
832 	return NULL;
833 }
834 
835 static struct nvkm_output *
exec_script(struct nv50_disp_priv * priv,int head,int id)836 exec_script(struct nv50_disp_priv *priv, int head, int id)
837 {
838 	struct nvkm_bios *bios = nvkm_bios(priv);
839 	struct nvkm_output *outp;
840 	struct nvbios_outp info;
841 	u8  ver, hdr, cnt, len;
842 	u32 data, ctrl = 0;
843 	int or;
844 
845 	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
846 		ctrl = nv_rd32(priv, 0x640180 + (or * 0x20));
847 		if (ctrl & (1 << head))
848 			break;
849 	}
850 
851 	if (or == 8)
852 		return NULL;
853 
854 	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info);
855 	if (outp) {
856 		struct nvbios_init init = {
857 			.subdev = nv_subdev(priv),
858 			.bios = bios,
859 			.offset = info.script[id],
860 			.outp = &outp->info,
861 			.crtc = head,
862 			.execute = 1,
863 		};
864 
865 		nvbios_exec(&init);
866 	}
867 
868 	return outp;
869 }
870 
871 static struct nvkm_output *
exec_clkcmp(struct nv50_disp_priv * priv,int head,int id,u32 pclk,u32 * conf)872 exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
873 {
874 	struct nvkm_bios *bios = nvkm_bios(priv);
875 	struct nvkm_output *outp;
876 	struct nvbios_outp info1;
877 	struct nvbios_ocfg info2;
878 	u8  ver, hdr, cnt, len;
879 	u32 data, ctrl = 0;
880 	int or;
881 
882 	for (or = 0; !(ctrl & (1 << head)) && or < 8; or++) {
883 		ctrl = nv_rd32(priv, 0x660180 + (or * 0x20));
884 		if (ctrl & (1 << head))
885 			break;
886 	}
887 
888 	if (or == 8)
889 		return NULL;
890 
891 	outp = exec_lookup(priv, head, or, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
892 	if (!outp)
893 		return NULL;
894 
895 	switch (outp->info.type) {
896 	case DCB_OUTPUT_TMDS:
897 		*conf = (ctrl & 0x00000f00) >> 8;
898 		if (pclk >= 165000)
899 			*conf |= 0x0100;
900 		break;
901 	case DCB_OUTPUT_LVDS:
902 		*conf = priv->sor.lvdsconf;
903 		break;
904 	case DCB_OUTPUT_DP:
905 		*conf = (ctrl & 0x00000f00) >> 8;
906 		break;
907 	case DCB_OUTPUT_ANALOG:
908 	default:
909 		*conf = 0x00ff;
910 		break;
911 	}
912 
913 	data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
914 	if (data && id < 0xff) {
915 		data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
916 		if (data) {
917 			struct nvbios_init init = {
918 				.subdev = nv_subdev(priv),
919 				.bios = bios,
920 				.offset = data,
921 				.outp = &outp->info,
922 				.crtc = head,
923 				.execute = 1,
924 			};
925 
926 			nvbios_exec(&init);
927 		}
928 	}
929 
930 	return outp;
931 }
932 
933 static void
gf110_disp_intr_unk1_0(struct nv50_disp_priv * priv,int head)934 gf110_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
935 {
936 	exec_script(priv, head, 1);
937 }
938 
939 static void
gf110_disp_intr_unk2_0(struct nv50_disp_priv * priv,int head)940 gf110_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
941 {
942 	struct nvkm_output *outp = exec_script(priv, head, 2);
943 
944 	/* see note in nv50_disp_intr_unk20_0() */
945 	if (outp && outp->info.type == DCB_OUTPUT_DP) {
946 		struct nvkm_output_dp *outpdp = (void *)outp;
947 		struct nvbios_init init = {
948 			.subdev = nv_subdev(priv),
949 			.bios = nvkm_bios(priv),
950 			.outp = &outp->info,
951 			.crtc = head,
952 			.offset = outpdp->info.script[4],
953 			.execute = 1,
954 		};
955 
956 		nvbios_exec(&init);
957 		atomic_set(&outpdp->lt.done, 0);
958 	}
959 }
960 
961 static void
gf110_disp_intr_unk2_1(struct nv50_disp_priv * priv,int head)962 gf110_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
963 {
964 	struct nvkm_devinit *devinit = nvkm_devinit(priv);
965 	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
966 	if (pclk)
967 		devinit->pll_set(devinit, PLL_VPLL0 + head, pclk);
968 	nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
969 }
970 
971 static void
gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv * priv,int head,struct dcb_output * outp)972 gf110_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
973 			  struct dcb_output *outp)
974 {
975 	const int or = ffs(outp->or) - 1;
976 	const u32 ctrl = nv_rd32(priv, 0x660200 + (or   * 0x020));
977 	const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
978 	const s32 vactive = nv_rd32(priv, 0x660414 + (head * 0x300)) & 0xffff;
979 	const s32 vblanke = nv_rd32(priv, 0x66041c + (head * 0x300)) & 0xffff;
980 	const s32 vblanks = nv_rd32(priv, 0x660420 + (head * 0x300)) & 0xffff;
981 	const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
982 	const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
983 	const u32 hoff = (head * 0x800);
984 	const u32 soff = (  or * 0x800);
985 	const u32 loff = (link * 0x080) + soff;
986 	const u32 symbol = 100000;
987 	const u32 TU = 64;
988 	u32 dpctrl = nv_rd32(priv, 0x61c10c + loff);
989 	u32 clksor = nv_rd32(priv, 0x612300 + soff);
990 	u32 datarate, link_nr, link_bw, bits;
991 	u64 ratio, value;
992 
993 	link_nr  = hweight32(dpctrl & 0x000f0000);
994 	link_bw  = (clksor & 0x007c0000) >> 18;
995 	link_bw *= 27000;
996 
997 	/* symbols/hblank - algorithm taken from comments in tegra driver */
998 	value = vblanke + vactive - vblanks - 7;
999 	value = value * link_bw;
1000 	do_div(value, pclk);
1001 	value = value - (3 * !!(dpctrl & 0x00004000)) - (12 / link_nr);
1002 	nv_mask(priv, 0x616620 + hoff, 0x0000ffff, value);
1003 
1004 	/* symbols/vblank - algorithm taken from comments in tegra driver */
1005 	value = vblanks - vblanke - 25;
1006 	value = value * link_bw;
1007 	do_div(value, pclk);
1008 	value = value - ((36 / link_nr) + 3) - 1;
1009 	nv_mask(priv, 0x616624 + hoff, 0x00ffffff, value);
1010 
1011 	/* watermark */
1012 	if      ((conf & 0x3c0) == 0x180) bits = 30;
1013 	else if ((conf & 0x3c0) == 0x140) bits = 24;
1014 	else                              bits = 18;
1015 	datarate = (pclk * bits) / 8;
1016 
1017 	ratio  = datarate;
1018 	ratio *= symbol;
1019 	do_div(ratio, link_nr * link_bw);
1020 
1021 	value  = (symbol - ratio) * TU;
1022 	value *= ratio;
1023 	do_div(value, symbol);
1024 	do_div(value, symbol);
1025 
1026 	value += 5;
1027 	value |= 0x08000000;
1028 
1029 	nv_wr32(priv, 0x616610 + hoff, value);
1030 }
1031 
1032 static void
gf110_disp_intr_unk2_2(struct nv50_disp_priv * priv,int head)1033 gf110_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1034 {
1035 	struct nvkm_output *outp;
1036 	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1037 	u32 conf, addr, data;
1038 
1039 	outp = exec_clkcmp(priv, head, 0xff, pclk, &conf);
1040 	if (!outp)
1041 		return;
1042 
1043 	/* see note in nv50_disp_intr_unk20_2() */
1044 	if (outp->info.type == DCB_OUTPUT_DP) {
1045 		u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1046 		switch ((sync & 0x000003c0) >> 6) {
1047 		case 6: pclk = pclk * 30; break;
1048 		case 5: pclk = pclk * 24; break;
1049 		case 2:
1050 		default:
1051 			pclk = pclk * 18;
1052 			break;
1053 		}
1054 
1055 		if (nvkm_output_dp_train(outp, pclk, true))
1056 			ERR("link not trained before attach\n");
1057 	} else {
1058 		if (priv->sor.magic)
1059 			priv->sor.magic(outp);
1060 	}
1061 
1062 	exec_clkcmp(priv, head, 0, pclk, &conf);
1063 
1064 	if (outp->info.type == DCB_OUTPUT_ANALOG) {
1065 		addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1066 		data = 0x00000000;
1067 	} else {
1068 		addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1069 		data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1070 		switch (outp->info.type) {
1071 		case DCB_OUTPUT_TMDS:
1072 			nv_mask(priv, addr, 0x007c0000, 0x00280000);
1073 			break;
1074 		case DCB_OUTPUT_DP:
1075 			gf110_disp_intr_unk2_2_tu(priv, head, &outp->info);
1076 			break;
1077 		default:
1078 			break;
1079 		}
1080 	}
1081 
1082 	nv_mask(priv, addr, 0x00000707, data);
1083 }
1084 
1085 static void
gf110_disp_intr_unk4_0(struct nv50_disp_priv * priv,int head)1086 gf110_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
1087 {
1088 	u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
1089 	u32 conf;
1090 
1091 	exec_clkcmp(priv, head, 1, pclk, &conf);
1092 }
1093 
1094 void
gf110_disp_intr_supervisor(struct work_struct * work)1095 gf110_disp_intr_supervisor(struct work_struct *work)
1096 {
1097 	struct nv50_disp_priv *priv =
1098 		container_of(work, struct nv50_disp_priv, supervisor);
1099 	struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1100 	u32 mask[4];
1101 	int head;
1102 
1103 	nv_debug(priv, "supervisor %d\n", ffs(priv->super));
1104 	for (head = 0; head < priv->head.nr; head++) {
1105 		mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
1106 		nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
1107 	}
1108 
1109 	if (priv->super & 0x00000001) {
1110 		nv50_disp_mthd_chan(priv, NV_DBG_DEBUG, 0, impl->mthd.core);
1111 		for (head = 0; head < priv->head.nr; head++) {
1112 			if (!(mask[head] & 0x00001000))
1113 				continue;
1114 			nv_debug(priv, "supervisor 1.0 - head %d\n", head);
1115 			gf110_disp_intr_unk1_0(priv, head);
1116 		}
1117 	} else
1118 	if (priv->super & 0x00000002) {
1119 		for (head = 0; head < priv->head.nr; head++) {
1120 			if (!(mask[head] & 0x00001000))
1121 				continue;
1122 			nv_debug(priv, "supervisor 2.0 - head %d\n", head);
1123 			gf110_disp_intr_unk2_0(priv, head);
1124 		}
1125 		for (head = 0; head < priv->head.nr; head++) {
1126 			if (!(mask[head] & 0x00010000))
1127 				continue;
1128 			nv_debug(priv, "supervisor 2.1 - head %d\n", head);
1129 			gf110_disp_intr_unk2_1(priv, head);
1130 		}
1131 		for (head = 0; head < priv->head.nr; head++) {
1132 			if (!(mask[head] & 0x00001000))
1133 				continue;
1134 			nv_debug(priv, "supervisor 2.2 - head %d\n", head);
1135 			gf110_disp_intr_unk2_2(priv, head);
1136 		}
1137 	} else
1138 	if (priv->super & 0x00000004) {
1139 		for (head = 0; head < priv->head.nr; head++) {
1140 			if (!(mask[head] & 0x00001000))
1141 				continue;
1142 			nv_debug(priv, "supervisor 3.0 - head %d\n", head);
1143 			gf110_disp_intr_unk4_0(priv, head);
1144 		}
1145 	}
1146 
1147 	for (head = 0; head < priv->head.nr; head++)
1148 		nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
1149 	nv_wr32(priv, 0x6101d0, 0x80000000);
1150 }
1151 
1152 static void
gf110_disp_intr_error(struct nv50_disp_priv * priv,int chid)1153 gf110_disp_intr_error(struct nv50_disp_priv *priv, int chid)
1154 {
1155 	const struct nv50_disp_impl *impl = (void *)nv_object(priv)->oclass;
1156 	u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
1157 	u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
1158 	u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
1159 
1160 	nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
1161 		       "0x%08x 0x%08x\n",
1162 		 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1163 
1164 	if (chid == 0) {
1165 		switch (mthd & 0xffc) {
1166 		case 0x0080:
1167 			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 0,
1168 					    impl->mthd.core);
1169 			break;
1170 		default:
1171 			break;
1172 		}
1173 	} else
1174 	if (chid <= 4) {
1175 		switch (mthd & 0xffc) {
1176 		case 0x0080:
1177 			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 1,
1178 					    impl->mthd.base);
1179 			break;
1180 		default:
1181 			break;
1182 		}
1183 	} else
1184 	if (chid <= 8) {
1185 		switch (mthd & 0xffc) {
1186 		case 0x0080:
1187 			nv50_disp_mthd_chan(priv, NV_DBG_ERROR, chid - 5,
1188 					    impl->mthd.ovly);
1189 			break;
1190 		default:
1191 			break;
1192 		}
1193 	}
1194 
1195 	nv_wr32(priv, 0x61009c, (1 << chid));
1196 	nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
1197 }
1198 
1199 void
gf110_disp_intr(struct nvkm_subdev * subdev)1200 gf110_disp_intr(struct nvkm_subdev *subdev)
1201 {
1202 	struct nv50_disp_priv *priv = (void *)subdev;
1203 	u32 intr = nv_rd32(priv, 0x610088);
1204 	int i;
1205 
1206 	if (intr & 0x00000001) {
1207 		u32 stat = nv_rd32(priv, 0x61008c);
1208 		while (stat) {
1209 			int chid = __ffs(stat); stat &= ~(1 << chid);
1210 			nv50_disp_chan_uevent_send(priv, chid);
1211 			nv_wr32(priv, 0x61008c, 1 << chid);
1212 		}
1213 		intr &= ~0x00000001;
1214 	}
1215 
1216 	if (intr & 0x00000002) {
1217 		u32 stat = nv_rd32(priv, 0x61009c);
1218 		int chid = ffs(stat) - 1;
1219 		if (chid >= 0)
1220 			gf110_disp_intr_error(priv, chid);
1221 		intr &= ~0x00000002;
1222 	}
1223 
1224 	if (intr & 0x00100000) {
1225 		u32 stat = nv_rd32(priv, 0x6100ac);
1226 		if (stat & 0x00000007) {
1227 			priv->super = (stat & 0x00000007);
1228 			schedule_work(&priv->supervisor);
1229 			nv_wr32(priv, 0x6100ac, priv->super);
1230 			stat &= ~0x00000007;
1231 		}
1232 
1233 		if (stat) {
1234 			nv_info(priv, "unknown intr24 0x%08x\n", stat);
1235 			nv_wr32(priv, 0x6100ac, stat);
1236 		}
1237 
1238 		intr &= ~0x00100000;
1239 	}
1240 
1241 	for (i = 0; i < priv->head.nr; i++) {
1242 		u32 mask = 0x01000000 << i;
1243 		if (mask & intr) {
1244 			u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
1245 			if (stat & 0x00000001)
1246 				nvkm_disp_vblank(&priv->base, i);
1247 			nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
1248 			nv_rd32(priv, 0x6100c0 + (i * 0x800));
1249 		}
1250 	}
1251 }
1252 
1253 static int
gf110_disp_ctor(struct nvkm_object * parent,struct nvkm_object * engine,struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)1254 gf110_disp_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
1255 		struct nvkm_oclass *oclass, void *data, u32 size,
1256 		struct nvkm_object **pobject)
1257 {
1258 	struct nv50_disp_priv *priv;
1259 	int heads = nv_rd32(parent, 0x022448);
1260 	int ret;
1261 
1262 	ret = nvkm_disp_create(parent, engine, oclass, heads,
1263 			       "PDISP", "display", &priv);
1264 	*pobject = nv_object(priv);
1265 	if (ret)
1266 		return ret;
1267 
1268 	ret = nvkm_event_init(&gf110_disp_chan_uevent, 1, 17, &priv->uevent);
1269 	if (ret)
1270 		return ret;
1271 
1272 	nv_engine(priv)->sclass = gf110_disp_main_oclass;
1273 	nv_engine(priv)->cclass = &nv50_disp_cclass;
1274 	nv_subdev(priv)->intr = gf110_disp_intr;
1275 	INIT_WORK(&priv->supervisor, gf110_disp_intr_supervisor);
1276 	priv->sclass = gf110_disp_sclass;
1277 	priv->head.nr = heads;
1278 	priv->dac.nr = 3;
1279 	priv->sor.nr = 4;
1280 	priv->dac.power = nv50_dac_power;
1281 	priv->dac.sense = nv50_dac_sense;
1282 	priv->sor.power = nv50_sor_power;
1283 	priv->sor.hda_eld = gf110_hda_eld;
1284 	priv->sor.hdmi = gf110_hdmi_ctrl;
1285 	return 0;
1286 }
1287 
1288 struct nvkm_oclass *
1289 gf110_disp_outp_sclass[] = {
1290 	&gf110_sor_dp_impl.base.base,
1291 	NULL
1292 };
1293 
1294 struct nvkm_oclass *
1295 gf110_disp_oclass = &(struct nv50_disp_impl) {
1296 	.base.base.handle = NV_ENGINE(DISP, 0x90),
1297 	.base.base.ofuncs = &(struct nvkm_ofuncs) {
1298 		.ctor = gf110_disp_ctor,
1299 		.dtor = _nvkm_disp_dtor,
1300 		.init = _nvkm_disp_init,
1301 		.fini = _nvkm_disp_fini,
1302 	},
1303 	.base.vblank = &gf110_disp_vblank_func,
1304 	.base.outp =  gf110_disp_outp_sclass,
1305 	.mthd.core = &gf110_disp_core_mthd_chan,
1306 	.mthd.base = &gf110_disp_base_mthd_chan,
1307 	.mthd.ovly = &gf110_disp_ovly_mthd_chan,
1308 	.mthd.prev = -0x020000,
1309 	.head.scanoutpos = gf110_disp_main_scanoutpos,
1310 }.base.base;
1311