1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "nv50.h"
25
26#include <core/client.h>
27#include <core/device.h>
28#include <core/handle.h>
29#include <engine/fifo.h>
30#include <subdev/timer.h>
31
32struct nv50_gr_priv {
33	struct nvkm_gr base;
34	spinlock_t lock;
35	u32 size;
36};
37
38struct nv50_gr_chan {
39	struct nvkm_gr_chan base;
40};
41
42static u64
43nv50_gr_units(struct nvkm_gr *gr)
44{
45	struct nv50_gr_priv *priv = (void *)gr;
46
47	return nv_rd32(priv, 0x1540);
48}
49
50/*******************************************************************************
51 * Graphics object classes
52 ******************************************************************************/
53
54static int
55nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
56		    struct nvkm_oclass *oclass, void *data, u32 size,
57		    struct nvkm_object **pobject)
58{
59	struct nvkm_gpuobj *obj;
60	int ret;
61
62	ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
63				 16, 16, 0, &obj);
64	*pobject = nv_object(obj);
65	if (ret)
66		return ret;
67
68	nv_wo32(obj, 0x00, nv_mclass(obj));
69	nv_wo32(obj, 0x04, 0x00000000);
70	nv_wo32(obj, 0x08, 0x00000000);
71	nv_wo32(obj, 0x0c, 0x00000000);
72	return 0;
73}
74
75static struct nvkm_ofuncs
76nv50_gr_ofuncs = {
77	.ctor = nv50_gr_object_ctor,
78	.dtor = _nvkm_gpuobj_dtor,
79	.init = _nvkm_gpuobj_init,
80	.fini = _nvkm_gpuobj_fini,
81	.rd32 = _nvkm_gpuobj_rd32,
82	.wr32 = _nvkm_gpuobj_wr32,
83};
84
85static struct nvkm_oclass
86nv50_gr_sclass[] = {
87	{ 0x0030, &nv50_gr_ofuncs },
88	{ 0x502d, &nv50_gr_ofuncs },
89	{ 0x5039, &nv50_gr_ofuncs },
90	{ 0x5097, &nv50_gr_ofuncs },
91	{ 0x50c0, &nv50_gr_ofuncs },
92	{}
93};
94
95static struct nvkm_oclass
96g84_gr_sclass[] = {
97	{ 0x0030, &nv50_gr_ofuncs },
98	{ 0x502d, &nv50_gr_ofuncs },
99	{ 0x5039, &nv50_gr_ofuncs },
100	{ 0x50c0, &nv50_gr_ofuncs },
101	{ 0x8297, &nv50_gr_ofuncs },
102	{}
103};
104
105static struct nvkm_oclass
106gt200_gr_sclass[] = {
107	{ 0x0030, &nv50_gr_ofuncs },
108	{ 0x502d, &nv50_gr_ofuncs },
109	{ 0x5039, &nv50_gr_ofuncs },
110	{ 0x50c0, &nv50_gr_ofuncs },
111	{ 0x8397, &nv50_gr_ofuncs },
112	{}
113};
114
115static struct nvkm_oclass
116gt215_gr_sclass[] = {
117	{ 0x0030, &nv50_gr_ofuncs },
118	{ 0x502d, &nv50_gr_ofuncs },
119	{ 0x5039, &nv50_gr_ofuncs },
120	{ 0x50c0, &nv50_gr_ofuncs },
121	{ 0x8597, &nv50_gr_ofuncs },
122	{ 0x85c0, &nv50_gr_ofuncs },
123	{}
124};
125
126static struct nvkm_oclass
127mcp89_gr_sclass[] = {
128	{ 0x0030, &nv50_gr_ofuncs },
129	{ 0x502d, &nv50_gr_ofuncs },
130	{ 0x5039, &nv50_gr_ofuncs },
131	{ 0x50c0, &nv50_gr_ofuncs },
132	{ 0x85c0, &nv50_gr_ofuncs },
133	{ 0x8697, &nv50_gr_ofuncs },
134	{}
135};
136
137/*******************************************************************************
138 * PGRAPH context
139 ******************************************************************************/
140
141static int
142nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
143		     struct nvkm_oclass *oclass, void *data, u32 size,
144		     struct nvkm_object **pobject)
145{
146	struct nv50_gr_priv *priv = (void *)engine;
147	struct nv50_gr_chan *chan;
148	int ret;
149
150	ret = nvkm_gr_context_create(parent, engine, oclass, NULL, priv->size,
151				     0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
152	*pobject = nv_object(chan);
153	if (ret)
154		return ret;
155
156	nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
157	return 0;
158}
159
160static struct nvkm_oclass
161nv50_gr_cclass = {
162	.handle = NV_ENGCTX(GR, 0x50),
163	.ofuncs = &(struct nvkm_ofuncs) {
164		.ctor = nv50_gr_context_ctor,
165		.dtor = _nvkm_gr_context_dtor,
166		.init = _nvkm_gr_context_init,
167		.fini = _nvkm_gr_context_fini,
168		.rd32 = _nvkm_gr_context_rd32,
169		.wr32 = _nvkm_gr_context_wr32,
170	},
171};
172
173/*******************************************************************************
174 * PGRAPH engine/subdev functions
175 ******************************************************************************/
176
177static const struct nvkm_bitfield nv50_pgr_status[] = {
178	{ 0x00000001, "BUSY" }, /* set when any bit is set */
179	{ 0x00000002, "DISPATCH" },
180	{ 0x00000004, "UNK2" },
181	{ 0x00000008, "UNK3" },
182	{ 0x00000010, "UNK4" },
183	{ 0x00000020, "UNK5" },
184	{ 0x00000040, "M2MF" },
185	{ 0x00000080, "UNK7" },
186	{ 0x00000100, "CTXPROG" },
187	{ 0x00000200, "VFETCH" },
188	{ 0x00000400, "CCACHE_PREGEOM" },
189	{ 0x00000800, "STRMOUT_VATTR_POSTGEOM" },
190	{ 0x00001000, "VCLIP" },
191	{ 0x00002000, "RATTR_APLANE" },
192	{ 0x00004000, "TRAST" },
193	{ 0x00008000, "CLIPID" },
194	{ 0x00010000, "ZCULL" },
195	{ 0x00020000, "ENG2D" },
196	{ 0x00040000, "RMASK" },
197	{ 0x00080000, "TPC_RAST" },
198	{ 0x00100000, "TPC_PROP" },
199	{ 0x00200000, "TPC_TEX" },
200	{ 0x00400000, "TPC_GEOM" },
201	{ 0x00800000, "TPC_MP" },
202	{ 0x01000000, "ROP" },
203	{}
204};
205
206static const char *const nv50_pgr_vstatus_0[] = {
207	"VFETCH", "CCACHE", "PREGEOM", "POSTGEOM", "VATTR", "STRMOUT", "VCLIP",
208	NULL
209};
210
211static const char *const nv50_pgr_vstatus_1[] = {
212	"TPC_RAST", "TPC_PROP", "TPC_TEX", "TPC_GEOM", "TPC_MP", NULL
213};
214
215static const char *const nv50_pgr_vstatus_2[] = {
216	"RATTR", "APLANE", "TRAST", "CLIPID", "ZCULL", "ENG2D", "RMASK",
217	"ROP", NULL
218};
219
220static void
221nvkm_pgr_vstatus_print(struct nv50_gr_priv *priv, int r,
222		       const char *const units[], u32 status)
223{
224	int i;
225
226	nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
227
228	for (i = 0; units[i] && status; i++) {
229		if ((status & 7) == 1)
230			pr_cont(" %s", units[i]);
231		status >>= 3;
232	}
233	if (status)
234		pr_cont(" (invalid: 0x%x)", status);
235	pr_cont("\n");
236}
237
238static int
239g84_gr_tlb_flush(struct nvkm_engine *engine)
240{
241	struct nvkm_timer *ptimer = nvkm_timer(engine);
242	struct nv50_gr_priv *priv = (void *)engine;
243	bool idle, timeout = false;
244	unsigned long flags;
245	u64 start;
246	u32 tmp;
247
248	spin_lock_irqsave(&priv->lock, flags);
249	nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
250
251	start = ptimer->read(ptimer);
252	do {
253		idle = true;
254
255		for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
256			if ((tmp & 7) == 1)
257				idle = false;
258		}
259
260		for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
261			if ((tmp & 7) == 1)
262				idle = false;
263		}
264
265		for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
266			if ((tmp & 7) == 1)
267				idle = false;
268		}
269	} while (!idle &&
270		 !(timeout = ptimer->read(ptimer) - start > 2000000000));
271
272	if (timeout) {
273		nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
274
275		tmp = nv_rd32(priv, 0x400700);
276		nv_error(priv, "PGRAPH_STATUS  : 0x%08x", tmp);
277		nvkm_bitfield_print(nv50_pgr_status, tmp);
278		pr_cont("\n");
279
280		nvkm_pgr_vstatus_print(priv, 0, nv50_pgr_vstatus_0,
281				       nv_rd32(priv, 0x400380));
282		nvkm_pgr_vstatus_print(priv, 1, nv50_pgr_vstatus_1,
283				       nv_rd32(priv, 0x400384));
284		nvkm_pgr_vstatus_print(priv, 2, nv50_pgr_vstatus_2,
285				       nv_rd32(priv, 0x400388));
286	}
287
288
289	nv_wr32(priv, 0x100c80, 0x00000001);
290	if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
291		nv_error(priv, "vm flush timeout\n");
292	nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
293	spin_unlock_irqrestore(&priv->lock, flags);
294	return timeout ? -EBUSY : 0;
295}
296
297static const struct nvkm_bitfield nv50_mp_exec_errors[] = {
298	{ 0x01, "STACK_UNDERFLOW" },
299	{ 0x02, "STACK_MISMATCH" },
300	{ 0x04, "QUADON_ACTIVE" },
301	{ 0x08, "TIMEOUT" },
302	{ 0x10, "INVALID_OPCODE" },
303	{ 0x20, "PM_OVERFLOW" },
304	{ 0x40, "BREAKPOINT" },
305	{}
306};
307
308static const struct nvkm_bitfield nv50_mpc_traps[] = {
309	{ 0x0000001, "LOCAL_LIMIT_READ" },
310	{ 0x0000010, "LOCAL_LIMIT_WRITE" },
311	{ 0x0000040, "STACK_LIMIT" },
312	{ 0x0000100, "GLOBAL_LIMIT_READ" },
313	{ 0x0001000, "GLOBAL_LIMIT_WRITE" },
314	{ 0x0010000, "MP0" },
315	{ 0x0020000, "MP1" },
316	{ 0x0040000, "GLOBAL_LIMIT_RED" },
317	{ 0x0400000, "GLOBAL_LIMIT_ATOM" },
318	{ 0x4000000, "MP2" },
319	{}
320};
321
322static const struct nvkm_bitfield nv50_tex_traps[] = {
323	{ 0x00000001, "" }, /* any bit set? */
324	{ 0x00000002, "FAULT" },
325	{ 0x00000004, "STORAGE_TYPE_MISMATCH" },
326	{ 0x00000008, "LINEAR_MISMATCH" },
327	{ 0x00000020, "WRONG_MEMTYPE" },
328	{}
329};
330
331static const struct nvkm_bitfield nv50_gr_trap_m2mf[] = {
332	{ 0x00000001, "NOTIFY" },
333	{ 0x00000002, "IN" },
334	{ 0x00000004, "OUT" },
335	{}
336};
337
338static const struct nvkm_bitfield nv50_gr_trap_vfetch[] = {
339	{ 0x00000001, "FAULT" },
340	{}
341};
342
343static const struct nvkm_bitfield nv50_gr_trap_strmout[] = {
344	{ 0x00000001, "FAULT" },
345	{}
346};
347
348static const struct nvkm_bitfield nv50_gr_trap_ccache[] = {
349	{ 0x00000001, "FAULT" },
350	{}
351};
352
353/* There must be a *lot* of these. Will take some time to gather them up. */
354const struct nvkm_enum nv50_data_error_names[] = {
355	{ 0x00000003, "INVALID_OPERATION", NULL },
356	{ 0x00000004, "INVALID_VALUE", NULL },
357	{ 0x00000005, "INVALID_ENUM", NULL },
358	{ 0x00000008, "INVALID_OBJECT", NULL },
359	{ 0x00000009, "READ_ONLY_OBJECT", NULL },
360	{ 0x0000000a, "SUPERVISOR_OBJECT", NULL },
361	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
362	{ 0x0000000c, "INVALID_BITFIELD", NULL },
363	{ 0x0000000d, "BEGIN_END_ACTIVE", NULL },
364	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
365	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
366	{ 0x00000010, "RT_DOUBLE_BIND", NULL },
367	{ 0x00000011, "RT_TYPES_MISMATCH", NULL },
368	{ 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
369	{ 0x00000015, "FP_TOO_FEW_REGS", NULL },
370	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
371	{ 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
372	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
373	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
374	{ 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
375	{ 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
376	{ 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
377	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
378	{ 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
379	{ 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
380	{ 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
381	{ 0x00000024, "VP_ZERO_INPUTS", NULL },
382	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
383	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
384	{ 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
385	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
386	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
387	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
388	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
389	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
390	{ 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
391	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
392	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
393	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
394	{ 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
395	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
396	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
397	{}
398};
399
400static const struct nvkm_bitfield nv50_gr_intr_name[] = {
401	{ 0x00000001, "NOTIFY" },
402	{ 0x00000002, "COMPUTE_QUERY" },
403	{ 0x00000010, "ILLEGAL_MTHD" },
404	{ 0x00000020, "ILLEGAL_CLASS" },
405	{ 0x00000040, "DOUBLE_NOTIFY" },
406	{ 0x00001000, "CONTEXT_SWITCH" },
407	{ 0x00010000, "BUFFER_NOTIFY" },
408	{ 0x00100000, "DATA_ERROR" },
409	{ 0x00200000, "TRAP" },
410	{ 0x01000000, "SINGLE_STEP" },
411	{}
412};
413
414static const struct nvkm_bitfield nv50_gr_trap_prop[] = {
415	{ 0x00000004, "SURF_WIDTH_OVERRUN" },
416	{ 0x00000008, "SURF_HEIGHT_OVERRUN" },
417	{ 0x00000010, "DST2D_FAULT" },
418	{ 0x00000020, "ZETA_FAULT" },
419	{ 0x00000040, "RT_FAULT" },
420	{ 0x00000080, "CUDA_FAULT" },
421	{ 0x00000100, "DST2D_STORAGE_TYPE_MISMATCH" },
422	{ 0x00000200, "ZETA_STORAGE_TYPE_MISMATCH" },
423	{ 0x00000400, "RT_STORAGE_TYPE_MISMATCH" },
424	{ 0x00000800, "DST2D_LINEAR_MISMATCH" },
425	{ 0x00001000, "RT_LINEAR_MISMATCH" },
426	{}
427};
428
429static void
430nv50_priv_prop_trap(struct nv50_gr_priv *priv,
431		    u32 ustatus_addr, u32 ustatus, u32 tp)
432{
433	u32 e0c = nv_rd32(priv, ustatus_addr + 0x04);
434	u32 e10 = nv_rd32(priv, ustatus_addr + 0x08);
435	u32 e14 = nv_rd32(priv, ustatus_addr + 0x0c);
436	u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
437	u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
438	u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
439	u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
440
441	/* CUDA memory: l[], g[] or stack. */
442	if (ustatus & 0x00000080) {
443		if (e18 & 0x80000000) {
444			/* g[] read fault? */
445			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global read fault at address %02x%08x\n",
446					 tp, e14, e10 | ((e18 >> 24) & 0x1f));
447			e18 &= ~0x1f000000;
448		} else if (e18 & 0xc) {
449			/* g[] write fault? */
450			nv_error(priv, "TRAP_PROP - TP %d - CUDA_FAULT - Global write fault at address %02x%08x\n",
451				 tp, e14, e10 | ((e18 >> 7) & 0x1f));
452			e18 &= ~0x00000f80;
453		} else {
454			nv_error(priv, "TRAP_PROP - TP %d - Unknown CUDA fault at address %02x%08x\n",
455				 tp, e14, e10);
456		}
457		ustatus &= ~0x00000080;
458	}
459	if (ustatus) {
460		nv_error(priv, "TRAP_PROP - TP %d -", tp);
461		nvkm_bitfield_print(nv50_gr_trap_prop, ustatus);
462		pr_cont(" - Address %02x%08x\n", e14, e10);
463	}
464	nv_error(priv, "TRAP_PROP - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
465		 tp, e0c, e18, e1c, e20, e24);
466}
467
468static void
469nv50_priv_mp_trap(struct nv50_gr_priv *priv, int tpid, int display)
470{
471	u32 units = nv_rd32(priv, 0x1540);
472	u32 addr, mp10, status, pc, oplow, ophigh;
473	int i;
474	int mps = 0;
475	for (i = 0; i < 4; i++) {
476		if (!(units & 1 << (i+24)))
477			continue;
478		if (nv_device(priv)->chipset < 0xa0)
479			addr = 0x408200 + (tpid << 12) + (i << 7);
480		else
481			addr = 0x408100 + (tpid << 11) + (i << 7);
482		mp10 = nv_rd32(priv, addr + 0x10);
483		status = nv_rd32(priv, addr + 0x14);
484		if (!status)
485			continue;
486		if (display) {
487			nv_rd32(priv, addr + 0x20);
488			pc = nv_rd32(priv, addr + 0x24);
489			oplow = nv_rd32(priv, addr + 0x70);
490			ophigh = nv_rd32(priv, addr + 0x74);
491			nv_error(priv, "TRAP_MP_EXEC - "
492					"TP %d MP %d:", tpid, i);
493			nvkm_bitfield_print(nv50_mp_exec_errors, status);
494			pr_cont(" at %06x warp %d, opcode %08x %08x\n",
495					pc&0xffffff, pc >> 24,
496					oplow, ophigh);
497		}
498		nv_wr32(priv, addr + 0x10, mp10);
499		nv_wr32(priv, addr + 0x14, 0);
500		mps++;
501	}
502	if (!mps && display)
503		nv_error(priv, "TRAP_MP_EXEC - TP %d: "
504				"No MPs claiming errors?\n", tpid);
505}
506
507static void
508nv50_priv_tp_trap(struct nv50_gr_priv *priv, int type, u32 ustatus_old,
509		  u32 ustatus_new, int display, const char *name)
510{
511	int tps = 0;
512	u32 units = nv_rd32(priv, 0x1540);
513	int i, r;
514	u32 ustatus_addr, ustatus;
515	for (i = 0; i < 16; i++) {
516		if (!(units & (1 << i)))
517			continue;
518		if (nv_device(priv)->chipset < 0xa0)
519			ustatus_addr = ustatus_old + (i << 12);
520		else
521			ustatus_addr = ustatus_new + (i << 11);
522		ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
523		if (!ustatus)
524			continue;
525		tps++;
526		switch (type) {
527		case 6: /* texture error... unknown for now */
528			if (display) {
529				nv_error(priv, "magic set %d:\n", i);
530				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
531					nv_error(priv, "\t0x%08x: 0x%08x\n", r,
532						nv_rd32(priv, r));
533				if (ustatus) {
534					nv_error(priv, "%s - TP%d:", name, i);
535					nvkm_bitfield_print(nv50_tex_traps,
536							       ustatus);
537					pr_cont("\n");
538					ustatus = 0;
539				}
540			}
541			break;
542		case 7: /* MP error */
543			if (ustatus & 0x04030000) {
544				nv50_priv_mp_trap(priv, i, display);
545				ustatus &= ~0x04030000;
546			}
547			if (ustatus && display) {
548				nv_error(priv, "%s - TP%d:", name, i);
549				nvkm_bitfield_print(nv50_mpc_traps, ustatus);
550				pr_cont("\n");
551				ustatus = 0;
552			}
553			break;
554		case 8: /* PROP error */
555			if (display)
556				nv50_priv_prop_trap(
557						priv, ustatus_addr, ustatus, i);
558			ustatus = 0;
559			break;
560		}
561		if (ustatus) {
562			if (display)
563				nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
564		}
565		nv_wr32(priv, ustatus_addr, 0xc0000000);
566	}
567
568	if (!tps && display)
569		nv_warn(priv, "%s - No TPs claiming errors?\n", name);
570}
571
572static int
573nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display,
574		     int chid, u64 inst, struct nvkm_object *engctx)
575{
576	u32 status = nv_rd32(priv, 0x400108);
577	u32 ustatus;
578
579	if (!status && display) {
580		nv_error(priv, "TRAP: no units reporting traps?\n");
581		return 1;
582	}
583
584	/* DISPATCH: Relays commands to other units and handles NOTIFY,
585	 * COND, QUERY. If you get a trap from it, the command is still stuck
586	 * in DISPATCH and you need to do something about it. */
587	if (status & 0x001) {
588		ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
589		if (!ustatus && display) {
590			nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
591		}
592
593		nv_wr32(priv, 0x400500, 0x00000000);
594
595		/* Known to be triggered by screwed up NOTIFY and COND... */
596		if (ustatus & 0x00000001) {
597			u32 addr = nv_rd32(priv, 0x400808);
598			u32 subc = (addr & 0x00070000) >> 16;
599			u32 mthd = (addr & 0x00001ffc);
600			u32 datal = nv_rd32(priv, 0x40080c);
601			u32 datah = nv_rd32(priv, 0x400810);
602			u32 class = nv_rd32(priv, 0x400814);
603			u32 r848 = nv_rd32(priv, 0x400848);
604
605			nv_error(priv, "TRAP DISPATCH_FAULT\n");
606			if (display && (addr & 0x80000000)) {
607				nv_error(priv,
608					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
609					 chid, inst,
610					 nvkm_client_name(engctx), subc,
611					 class, mthd, datah, datal, addr, r848);
612			} else
613			if (display) {
614				nv_error(priv, "no stuck command?\n");
615			}
616
617			nv_wr32(priv, 0x400808, 0);
618			nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
619			nv_wr32(priv, 0x400848, 0);
620			ustatus &= ~0x00000001;
621		}
622
623		if (ustatus & 0x00000002) {
624			u32 addr = nv_rd32(priv, 0x40084c);
625			u32 subc = (addr & 0x00070000) >> 16;
626			u32 mthd = (addr & 0x00001ffc);
627			u32 data = nv_rd32(priv, 0x40085c);
628			u32 class = nv_rd32(priv, 0x400814);
629
630			nv_error(priv, "TRAP DISPATCH_QUERY\n");
631			if (display && (addr & 0x80000000)) {
632				nv_error(priv,
633					 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
634					 chid, inst,
635					 nvkm_client_name(engctx), subc,
636					 class, mthd, data, addr);
637			} else
638			if (display) {
639				nv_error(priv, "no stuck command?\n");
640			}
641
642			nv_wr32(priv, 0x40084c, 0);
643			ustatus &= ~0x00000002;
644		}
645
646		if (ustatus && display) {
647			nv_error(priv, "TRAP_DISPATCH (unknown "
648				      "0x%08x)\n", ustatus);
649		}
650
651		nv_wr32(priv, 0x400804, 0xc0000000);
652		nv_wr32(priv, 0x400108, 0x001);
653		status &= ~0x001;
654		if (!status)
655			return 0;
656	}
657
658	/* M2MF: Memory to memory copy engine. */
659	if (status & 0x002) {
660		u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
661		if (display) {
662			nv_error(priv, "TRAP_M2MF");
663			nvkm_bitfield_print(nv50_gr_trap_m2mf, ustatus);
664			pr_cont("\n");
665			nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
666				nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
667				nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
668
669		}
670
671		/* No sane way found yet -- just reset the bugger. */
672		nv_wr32(priv, 0x400040, 2);
673		nv_wr32(priv, 0x400040, 0);
674		nv_wr32(priv, 0x406800, 0xc0000000);
675		nv_wr32(priv, 0x400108, 0x002);
676		status &= ~0x002;
677	}
678
679	/* VFETCH: Fetches data from vertex buffers. */
680	if (status & 0x004) {
681		u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
682		if (display) {
683			nv_error(priv, "TRAP_VFETCH");
684			nvkm_bitfield_print(nv50_gr_trap_vfetch, ustatus);
685			pr_cont("\n");
686			nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
687				nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
688				nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
689		}
690
691		nv_wr32(priv, 0x400c04, 0xc0000000);
692		nv_wr32(priv, 0x400108, 0x004);
693		status &= ~0x004;
694	}
695
696	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
697	if (status & 0x008) {
698		ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
699		if (display) {
700			nv_error(priv, "TRAP_STRMOUT");
701			nvkm_bitfield_print(nv50_gr_trap_strmout, ustatus);
702			pr_cont("\n");
703			nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
704				nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
705				nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
706
707		}
708
709		/* No sane way found yet -- just reset the bugger. */
710		nv_wr32(priv, 0x400040, 0x80);
711		nv_wr32(priv, 0x400040, 0);
712		nv_wr32(priv, 0x401800, 0xc0000000);
713		nv_wr32(priv, 0x400108, 0x008);
714		status &= ~0x008;
715	}
716
717	/* CCACHE: Handles code and c[] caches and fills them. */
718	if (status & 0x010) {
719		ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
720		if (display) {
721			nv_error(priv, "TRAP_CCACHE");
722			nvkm_bitfield_print(nv50_gr_trap_ccache, ustatus);
723			pr_cont("\n");
724			nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
725				     " %08x %08x %08x\n",
726				nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
727				nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
728				nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
729				nv_rd32(priv, 0x40501c));
730
731		}
732
733		nv_wr32(priv, 0x405018, 0xc0000000);
734		nv_wr32(priv, 0x400108, 0x010);
735		status &= ~0x010;
736	}
737
738	/* Unknown, not seen yet... 0x402000 is the only trap status reg
739	 * remaining, so try to handle it anyway. Perhaps related to that
740	 * unknown DMA slot on tesla? */
741	if (status & 0x20) {
742		ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
743		if (display)
744			nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
745		nv_wr32(priv, 0x402000, 0xc0000000);
746		/* no status modifiction on purpose */
747	}
748
749	/* TEXTURE: CUDA texturing units */
750	if (status & 0x040) {
751		nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
752				    "TRAP_TEXTURE");
753		nv_wr32(priv, 0x400108, 0x040);
754		status &= ~0x040;
755	}
756
757	/* MP: CUDA execution engines. */
758	if (status & 0x080) {
759		nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
760				    "TRAP_MP");
761		nv_wr32(priv, 0x400108, 0x080);
762		status &= ~0x080;
763	}
764
765	/* PROP:  Handles TP-initiated uncached memory accesses:
766	 * l[], g[], stack, 2d surfaces, render targets. */
767	if (status & 0x100) {
768		nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
769				    "TRAP_PROP");
770		nv_wr32(priv, 0x400108, 0x100);
771		status &= ~0x100;
772	}
773
774	if (status) {
775		if (display)
776			nv_error(priv, "TRAP: unknown 0x%08x\n", status);
777		nv_wr32(priv, 0x400108, status);
778	}
779
780	return 1;
781}
782
783static void
784nv50_gr_intr(struct nvkm_subdev *subdev)
785{
786	struct nvkm_fifo *pfifo = nvkm_fifo(subdev);
787	struct nvkm_engine *engine = nv_engine(subdev);
788	struct nvkm_object *engctx;
789	struct nvkm_handle *handle = NULL;
790	struct nv50_gr_priv *priv = (void *)subdev;
791	u32 stat = nv_rd32(priv, 0x400100);
792	u32 inst = nv_rd32(priv, 0x40032c) & 0x0fffffff;
793	u32 addr = nv_rd32(priv, 0x400704);
794	u32 subc = (addr & 0x00070000) >> 16;
795	u32 mthd = (addr & 0x00001ffc);
796	u32 data = nv_rd32(priv, 0x400708);
797	u32 class = nv_rd32(priv, 0x400814);
798	u32 show = stat, show_bitfield = stat;
799	int chid;
800
801	engctx = nvkm_engctx_get(engine, inst);
802	chid   = pfifo->chid(pfifo, engctx);
803
804	if (stat & 0x00000010) {
805		handle = nvkm_handle_get_class(engctx, class);
806		if (handle && !nv_call(handle->object, mthd, data))
807			show &= ~0x00000010;
808		nvkm_handle_put(handle);
809	}
810
811	if (show & 0x00100000) {
812		u32 ecode = nv_rd32(priv, 0x400110);
813		nv_error(priv, "DATA_ERROR ");
814		nvkm_enum_print(nv50_data_error_names, ecode);
815		pr_cont("\n");
816		show_bitfield &= ~0x00100000;
817	}
818
819	if (stat & 0x00200000) {
820		if (!nv50_gr_trap_handler(priv, show, chid, (u64)inst << 12,
821					  engctx))
822			show &= ~0x00200000;
823		show_bitfield &= ~0x00200000;
824	}
825
826	nv_wr32(priv, 0x400100, stat);
827	nv_wr32(priv, 0x400500, 0x00010001);
828
829	if (show) {
830		show &= show_bitfield;
831		if (show) {
832			nv_error(priv, "%s", "");
833			nvkm_bitfield_print(nv50_gr_intr_name, show);
834			pr_cont("\n");
835		}
836		nv_error(priv,
837			 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
838			 chid, (u64)inst << 12, nvkm_client_name(engctx),
839			 subc, class, mthd, data);
840	}
841
842	if (nv_rd32(priv, 0x400824) & (1 << 31))
843		nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
844
845	nvkm_engctx_put(engctx);
846}
847
848static int
849nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
850	     struct nvkm_oclass *oclass, void *data, u32 size,
851	     struct nvkm_object **pobject)
852{
853	struct nv50_gr_priv *priv;
854	int ret;
855
856	ret = nvkm_gr_create(parent, engine, oclass, true, &priv);
857	*pobject = nv_object(priv);
858	if (ret)
859		return ret;
860
861	nv_subdev(priv)->unit = 0x00201000;
862	nv_subdev(priv)->intr = nv50_gr_intr;
863	nv_engine(priv)->cclass = &nv50_gr_cclass;
864
865	priv->base.units = nv50_gr_units;
866
867	switch (nv_device(priv)->chipset) {
868	case 0x50:
869		nv_engine(priv)->sclass = nv50_gr_sclass;
870		break;
871	case 0x84:
872	case 0x86:
873	case 0x92:
874	case 0x94:
875	case 0x96:
876	case 0x98:
877		nv_engine(priv)->sclass = g84_gr_sclass;
878		break;
879	case 0xa0:
880	case 0xaa:
881	case 0xac:
882		nv_engine(priv)->sclass = gt200_gr_sclass;
883		break;
884	case 0xa3:
885	case 0xa5:
886	case 0xa8:
887		nv_engine(priv)->sclass = gt215_gr_sclass;
888		break;
889	case 0xaf:
890		nv_engine(priv)->sclass = mcp89_gr_sclass;
891		break;
892
893	}
894
895	/* unfortunate hw bug workaround... */
896	if (nv_device(priv)->chipset != 0x50 &&
897	    nv_device(priv)->chipset != 0xac)
898		nv_engine(priv)->tlb_flush = g84_gr_tlb_flush;
899
900	spin_lock_init(&priv->lock);
901	return 0;
902}
903
904static int
905nv50_gr_init(struct nvkm_object *object)
906{
907	struct nv50_gr_priv *priv = (void *)object;
908	int ret, units, i;
909
910	ret = nvkm_gr_init(&priv->base);
911	if (ret)
912		return ret;
913
914	/* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
915	nv_wr32(priv, 0x40008c, 0x00000004);
916
917	/* reset/enable traps and interrupts */
918	nv_wr32(priv, 0x400804, 0xc0000000);
919	nv_wr32(priv, 0x406800, 0xc0000000);
920	nv_wr32(priv, 0x400c04, 0xc0000000);
921	nv_wr32(priv, 0x401800, 0xc0000000);
922	nv_wr32(priv, 0x405018, 0xc0000000);
923	nv_wr32(priv, 0x402000, 0xc0000000);
924
925	units = nv_rd32(priv, 0x001540);
926	for (i = 0; i < 16; i++) {
927		if (!(units & (1 << i)))
928			continue;
929
930		if (nv_device(priv)->chipset < 0xa0) {
931			nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
932			nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
933			nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
934		} else {
935			nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
936			nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
937			nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
938		}
939	}
940
941	nv_wr32(priv, 0x400108, 0xffffffff);
942	nv_wr32(priv, 0x400138, 0xffffffff);
943	nv_wr32(priv, 0x400100, 0xffffffff);
944	nv_wr32(priv, 0x40013c, 0xffffffff);
945	nv_wr32(priv, 0x400500, 0x00010001);
946
947	/* upload context program, initialise ctxctl defaults */
948	ret = nv50_grctx_init(nv_device(priv), &priv->size);
949	if (ret)
950		return ret;
951
952	nv_wr32(priv, 0x400824, 0x00000000);
953	nv_wr32(priv, 0x400828, 0x00000000);
954	nv_wr32(priv, 0x40082c, 0x00000000);
955	nv_wr32(priv, 0x400830, 0x00000000);
956	nv_wr32(priv, 0x40032c, 0x00000000);
957	nv_wr32(priv, 0x400330, 0x00000000);
958
959	/* some unknown zcull magic */
960	switch (nv_device(priv)->chipset & 0xf0) {
961	case 0x50:
962	case 0x80:
963	case 0x90:
964		nv_wr32(priv, 0x402ca8, 0x00000800);
965		break;
966	case 0xa0:
967	default:
968		if (nv_device(priv)->chipset == 0xa0 ||
969		    nv_device(priv)->chipset == 0xaa ||
970		    nv_device(priv)->chipset == 0xac) {
971			nv_wr32(priv, 0x402ca8, 0x00000802);
972		} else {
973			nv_wr32(priv, 0x402cc0, 0x00000000);
974			nv_wr32(priv, 0x402ca8, 0x00000002);
975		}
976
977		break;
978	}
979
980	/* zero out zcull regions */
981	for (i = 0; i < 8; i++) {
982		nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
983		nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
984		nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
985		nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
986	}
987	return 0;
988}
989
990struct nvkm_oclass
991nv50_gr_oclass = {
992	.handle = NV_ENGINE(GR, 0x50),
993	.ofuncs = &(struct nvkm_ofuncs) {
994		.ctor = nv50_gr_ctor,
995		.dtor = _nvkm_gr_dtor,
996		.init = nv50_gr_init,
997		.fini = _nvkm_gr_fini,
998	},
999};
1000