1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "nv04.h"
25
26 #include <core/client.h>
27 #include <core/device.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/fb.h>
31 #include <subdev/instmem/nv04.h>
32
33 #include <nvif/class.h>
34 #include <nvif/unpack.h>
35
36 static struct ramfc_desc
37 nv40_ramfc[] = {
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
41 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
42 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
43 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
44 { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
45 { 2, 28, 0x18, 28, 0x002058 },
46 { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
47 { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
48 { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
49 { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
50 { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
51 { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
52 { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
53 { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
54 { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
55 { 32, 0, 0x40, 0, 0x0032e4 },
56 { 32, 0, 0x44, 0, 0x0032e8 },
57 { 32, 0, 0x4c, 0, 0x002088 },
58 { 32, 0, 0x50, 0, 0x003300 },
59 { 32, 0, 0x54, 0, 0x00330c },
60 {}
61 };
62
63 /*******************************************************************************
64 * FIFO channel objects
65 ******************************************************************************/
66
67 static int
nv40_fifo_object_attach(struct nvkm_object * parent,struct nvkm_object * object,u32 handle)68 nv40_fifo_object_attach(struct nvkm_object *parent,
69 struct nvkm_object *object, u32 handle)
70 {
71 struct nv04_fifo_priv *priv = (void *)parent->engine;
72 struct nv04_fifo_chan *chan = (void *)parent;
73 u32 context, chid = chan->base.chid;
74 int ret;
75
76 if (nv_iclass(object, NV_GPUOBJ_CLASS))
77 context = nv_gpuobj(object)->addr >> 4;
78 else
79 context = 0x00000004; /* just non-zero */
80
81 switch (nv_engidx(object->engine)) {
82 case NVDEV_ENGINE_DMAOBJ:
83 case NVDEV_ENGINE_SW:
84 context |= 0x00000000;
85 break;
86 case NVDEV_ENGINE_GR:
87 context |= 0x00100000;
88 break;
89 case NVDEV_ENGINE_MPEG:
90 context |= 0x00200000;
91 break;
92 default:
93 return -EINVAL;
94 }
95
96 context |= chid << 23;
97
98 mutex_lock(&nv_subdev(priv)->mutex);
99 ret = nvkm_ramht_insert(priv->ramht, chid, handle, context);
100 mutex_unlock(&nv_subdev(priv)->mutex);
101 return ret;
102 }
103
104 static int
nv40_fifo_context_attach(struct nvkm_object * parent,struct nvkm_object * engctx)105 nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
106 {
107 struct nv04_fifo_priv *priv = (void *)parent->engine;
108 struct nv04_fifo_chan *chan = (void *)parent;
109 unsigned long flags;
110 u32 reg, ctx;
111
112 switch (nv_engidx(engctx->engine)) {
113 case NVDEV_ENGINE_SW:
114 return 0;
115 case NVDEV_ENGINE_GR:
116 reg = 0x32e0;
117 ctx = 0x38;
118 break;
119 case NVDEV_ENGINE_MPEG:
120 reg = 0x330c;
121 ctx = 0x54;
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 spin_lock_irqsave(&priv->base.lock, flags);
128 nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
129 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
130
131 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
132 nv_wr32(priv, reg, nv_engctx(engctx)->addr);
133 nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
134
135 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
136 spin_unlock_irqrestore(&priv->base.lock, flags);
137 return 0;
138 }
139
140 static int
nv40_fifo_context_detach(struct nvkm_object * parent,bool suspend,struct nvkm_object * engctx)141 nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
142 struct nvkm_object *engctx)
143 {
144 struct nv04_fifo_priv *priv = (void *)parent->engine;
145 struct nv04_fifo_chan *chan = (void *)parent;
146 unsigned long flags;
147 u32 reg, ctx;
148
149 switch (nv_engidx(engctx->engine)) {
150 case NVDEV_ENGINE_SW:
151 return 0;
152 case NVDEV_ENGINE_GR:
153 reg = 0x32e0;
154 ctx = 0x38;
155 break;
156 case NVDEV_ENGINE_MPEG:
157 reg = 0x330c;
158 ctx = 0x54;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 spin_lock_irqsave(&priv->base.lock, flags);
165 nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
166
167 if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
168 nv_wr32(priv, reg, 0x00000000);
169 nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
170
171 nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
172 spin_unlock_irqrestore(&priv->base.lock, flags);
173 return 0;
174 }
175
176 static int
nv40_fifo_chan_ctor(struct nvkm_object * parent,struct nvkm_object * engine,struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)177 nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
178 struct nvkm_oclass *oclass, void *data, u32 size,
179 struct nvkm_object **pobject)
180 {
181 union {
182 struct nv03_channel_dma_v0 v0;
183 } *args = data;
184 struct nv04_fifo_priv *priv = (void *)engine;
185 struct nv04_fifo_chan *chan;
186 int ret;
187
188 nv_ioctl(parent, "create channel dma size %d\n", size);
189 if (nvif_unpack(args->v0, 0, 0, false)) {
190 nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
191 "offset %016llx\n", args->v0.version,
192 args->v0.pushbuf, args->v0.offset);
193 } else
194 return ret;
195
196 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
197 0x1000, args->v0.pushbuf,
198 (1ULL << NVDEV_ENGINE_DMAOBJ) |
199 (1ULL << NVDEV_ENGINE_SW) |
200 (1ULL << NVDEV_ENGINE_GR) |
201 (1ULL << NVDEV_ENGINE_MPEG), &chan);
202 *pobject = nv_object(chan);
203 if (ret)
204 return ret;
205
206 args->v0.chid = chan->base.chid;
207
208 nv_parent(chan)->context_attach = nv40_fifo_context_attach;
209 nv_parent(chan)->context_detach = nv40_fifo_context_detach;
210 nv_parent(chan)->object_attach = nv40_fifo_object_attach;
211 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
212 chan->ramfc = chan->base.chid * 128;
213
214 nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
215 nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
216 nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
217 nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
218 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
219 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
220 #ifdef __BIG_ENDIAN
221 NV_PFIFO_CACHE1_BIG_ENDIAN |
222 #endif
223 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
224 nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
225 return 0;
226 }
227
228 static struct nvkm_ofuncs
229 nv40_fifo_ofuncs = {
230 .ctor = nv40_fifo_chan_ctor,
231 .dtor = nv04_fifo_chan_dtor,
232 .init = nv04_fifo_chan_init,
233 .fini = nv04_fifo_chan_fini,
234 .map = _nvkm_fifo_channel_map,
235 .rd32 = _nvkm_fifo_channel_rd32,
236 .wr32 = _nvkm_fifo_channel_wr32,
237 .ntfy = _nvkm_fifo_channel_ntfy
238 };
239
240 static struct nvkm_oclass
241 nv40_fifo_sclass[] = {
242 { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
243 {}
244 };
245
246 /*******************************************************************************
247 * FIFO context - basically just the instmem reserved for the channel
248 ******************************************************************************/
249
250 static struct nvkm_oclass
251 nv40_fifo_cclass = {
252 .handle = NV_ENGCTX(FIFO, 0x40),
253 .ofuncs = &(struct nvkm_ofuncs) {
254 .ctor = nv04_fifo_context_ctor,
255 .dtor = _nvkm_fifo_context_dtor,
256 .init = _nvkm_fifo_context_init,
257 .fini = _nvkm_fifo_context_fini,
258 .rd32 = _nvkm_fifo_context_rd32,
259 .wr32 = _nvkm_fifo_context_wr32,
260 },
261 };
262
263 /*******************************************************************************
264 * PFIFO engine
265 ******************************************************************************/
266
267 static int
nv40_fifo_ctor(struct nvkm_object * parent,struct nvkm_object * engine,struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)268 nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
269 struct nvkm_oclass *oclass, void *data, u32 size,
270 struct nvkm_object **pobject)
271 {
272 struct nv04_instmem_priv *imem = nv04_instmem(parent);
273 struct nv04_fifo_priv *priv;
274 int ret;
275
276 ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv);
277 *pobject = nv_object(priv);
278 if (ret)
279 return ret;
280
281 nvkm_ramht_ref(imem->ramht, &priv->ramht);
282 nvkm_gpuobj_ref(imem->ramro, &priv->ramro);
283 nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc);
284
285 nv_subdev(priv)->unit = 0x00000100;
286 nv_subdev(priv)->intr = nv04_fifo_intr;
287 nv_engine(priv)->cclass = &nv40_fifo_cclass;
288 nv_engine(priv)->sclass = nv40_fifo_sclass;
289 priv->base.pause = nv04_fifo_pause;
290 priv->base.start = nv04_fifo_start;
291 priv->ramfc_desc = nv40_ramfc;
292 return 0;
293 }
294
295 static int
nv40_fifo_init(struct nvkm_object * object)296 nv40_fifo_init(struct nvkm_object *object)
297 {
298 struct nv04_fifo_priv *priv = (void *)object;
299 struct nvkm_fb *pfb = nvkm_fb(object);
300 int ret;
301
302 ret = nvkm_fifo_init(&priv->base);
303 if (ret)
304 return ret;
305
306 nv_wr32(priv, 0x002040, 0x000000ff);
307 nv_wr32(priv, 0x002044, 0x2101ffff);
308 nv_wr32(priv, 0x002058, 0x00000001);
309
310 nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
311 ((priv->ramht->bits - 9) << 16) |
312 (priv->ramht->gpuobj.addr >> 8));
313 nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
314
315 switch (nv_device(priv)->chipset) {
316 case 0x47:
317 case 0x49:
318 case 0x4b:
319 nv_wr32(priv, 0x002230, 0x00000001);
320 case 0x40:
321 case 0x41:
322 case 0x42:
323 case 0x43:
324 case 0x45:
325 case 0x48:
326 nv_wr32(priv, 0x002220, 0x00030002);
327 break;
328 default:
329 nv_wr32(priv, 0x002230, 0x00000000);
330 nv_wr32(priv, 0x002220, ((pfb->ram->size - 512 * 1024 +
331 priv->ramfc->addr) >> 16) |
332 0x00030000);
333 break;
334 }
335
336 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
337
338 nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
339 nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
340
341 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
342 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
343 nv_wr32(priv, NV03_PFIFO_CACHES, 1);
344 return 0;
345 }
346
347 struct nvkm_oclass *
348 nv40_fifo_oclass = &(struct nvkm_oclass) {
349 .handle = NV_ENGINE(FIFO, 0x40),
350 .ofuncs = &(struct nvkm_ofuncs) {
351 .ctor = nv40_fifo_ctor,
352 .dtor = nv04_fifo_dtor,
353 .init = nv40_fifo_init,
354 .fini = _nvkm_fifo_fini,
355 },
356 };
357