This source file includes following definitions.
- nvkm_falcon_v1_load_imem
- nvkm_falcon_v1_load_emem
- nvkm_falcon_v1_load_dmem
- nvkm_falcon_v1_read_emem
- nvkm_falcon_v1_read_dmem
- nvkm_falcon_v1_bind_context
- nvkm_falcon_v1_set_start_addr
- nvkm_falcon_v1_start
- nvkm_falcon_v1_wait_for_halt
- nvkm_falcon_v1_clear_interrupt
- falcon_v1_wait_idle
- nvkm_falcon_v1_enable
- nvkm_falcon_v1_disable
- nvkm_falcon_v1_new
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include "priv.h"
23
24 #include <core/gpuobj.h>
25 #include <core/memory.h>
26 #include <subdev/timer.h>
27
28 static void
29 nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
30 u32 size, u16 tag, u8 port, bool secure)
31 {
32 u8 rem = size % 4;
33 u32 reg;
34 int i;
35
36 size -= rem;
37
38 reg = start | BIT(24) | (secure ? BIT(28) : 0);
39 nvkm_falcon_wr32(falcon, 0x180 + (port * 16), reg);
40 for (i = 0; i < size / 4; i++) {
41
42 if ((i & 0x3f) == 0)
43 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
44 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
45 }
46
47
48
49
50
51 if (rem) {
52 u32 extra = ((u32 *)data)[i];
53
54
55 if ((i & 0x3f) == 0)
56 nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
57 nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
58 extra & (BIT(rem * 8) - 1));
59 ++i;
60 }
61
62
63 for (; i & 0x3f; i++)
64 nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
65 }
66
67 static void
68 nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
69 u32 size, u8 port)
70 {
71 u8 rem = size % 4;
72 int i;
73
74 size -= rem;
75
76 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
77 for (i = 0; i < size / 4; i++)
78 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
79
80
81
82
83
84 if (rem) {
85 u32 extra = ((u32 *)data)[i];
86
87 nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
88 extra & (BIT(rem * 8) - 1));
89 }
90 }
91
92 static const u32 EMEM_START_ADDR = 0x1000000;
93
94 static void
95 nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
96 u32 size, u8 port)
97 {
98 u8 rem = size % 4;
99 int i;
100
101 if (start >= EMEM_START_ADDR && falcon->has_emem)
102 return nvkm_falcon_v1_load_emem(falcon, data,
103 start - EMEM_START_ADDR, size,
104 port);
105
106 size -= rem;
107
108 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
109 for (i = 0; i < size / 4; i++)
110 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
111
112
113
114
115
116 if (rem) {
117 u32 extra = ((u32 *)data)[i];
118
119 nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
120 extra & (BIT(rem * 8) - 1));
121 }
122 }
123
124 static void
125 nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
126 u8 port, void *data)
127 {
128 u8 rem = size % 4;
129 int i;
130
131 size -= rem;
132
133 nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
134 for (i = 0; i < size / 4; i++)
135 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
136
137
138
139
140
141 if (rem) {
142 u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
143
144 for (i = size; i < size + rem; i++) {
145 ((u8 *)data)[i] = (u8)(extra & 0xff);
146 extra >>= 8;
147 }
148 }
149 }
150
151 static void
152 nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
153 u8 port, void *data)
154 {
155 u8 rem = size % 4;
156 int i;
157
158 if (start >= EMEM_START_ADDR && falcon->has_emem)
159 return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
160 size, port, data);
161
162 size -= rem;
163
164 nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
165 for (i = 0; i < size / 4; i++)
166 ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
167
168
169
170
171
172 if (rem) {
173 u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
174
175 for (i = size; i < size + rem; i++) {
176 ((u8 *)data)[i] = (u8)(extra & 0xff);
177 extra >>= 8;
178 }
179 }
180 }
181
182 static void
183 nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx)
184 {
185 struct nvkm_device *device = falcon->owner->device;
186 u32 inst_loc;
187 u32 fbif;
188
189
190 if (ctx == NULL) {
191 nvkm_falcon_wr32(falcon, 0x10c, 0x0);
192 return;
193 }
194
195 switch (falcon->owner->index) {
196 case NVKM_ENGINE_NVENC0:
197 case NVKM_ENGINE_NVENC1:
198 case NVKM_ENGINE_NVENC2:
199 fbif = 0x800;
200 break;
201 case NVKM_SUBDEV_PMU:
202 fbif = 0xe00;
203 break;
204 default:
205 fbif = 0x600;
206 break;
207 }
208
209 nvkm_falcon_wr32(falcon, 0x10c, 0x1);
210
211
212 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
213 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
214
215 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
216 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
217 nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
218
219
220 switch (nvkm_memory_target(ctx)) {
221 case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
222 case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
223 case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
224 default:
225 WARN_ON(1);
226 return;
227 }
228
229
230 nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
231 nvkm_falcon_wr32(falcon, 0x054,
232 ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) |
233 (inst_loc << 28) | (1 << 30));
234
235 nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
236 nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
237
238
239
240
241
242
243
244
245
246
247
248
249 switch (falcon->owner->index) {
250 case NVKM_SUBDEV_GSP:
251 case NVKM_ENGINE_SEC2:
252 nvkm_msec(device, 10,
253 u32 irqstat = nvkm_falcon_rd32(falcon, 0x008);
254 u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
255 if ((irqstat & 0x00000008) &&
256 (flcn0dc & 0x00007000) == 0x00005000)
257 break;
258 );
259
260 nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008);
261 nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002);
262
263 nvkm_msec(device, 10,
264 u32 flcn0dc = nvkm_falcon_rd32(falcon, 0x0dc);
265 if ((flcn0dc & 0x00007000) == 0x00000000)
266 break;
267 );
268 break;
269 default:
270 break;
271 }
272 }
273
274 static void
275 nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr)
276 {
277 nvkm_falcon_wr32(falcon, 0x104, start_addr);
278 }
279
280 static void
281 nvkm_falcon_v1_start(struct nvkm_falcon *falcon)
282 {
283 u32 reg = nvkm_falcon_rd32(falcon, 0x100);
284
285 if (reg & BIT(6))
286 nvkm_falcon_wr32(falcon, 0x130, 0x2);
287 else
288 nvkm_falcon_wr32(falcon, 0x100, 0x2);
289 }
290
291 static int
292 nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms)
293 {
294 struct nvkm_device *device = falcon->owner->device;
295 int ret;
296
297 ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10);
298 if (ret < 0)
299 return ret;
300
301 return 0;
302 }
303
304 static int
305 nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
306 {
307 struct nvkm_device *device = falcon->owner->device;
308 int ret;
309
310
311 nvkm_falcon_mask(falcon, 0x004, mask, mask);
312
313 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0);
314 if (ret < 0)
315 return ret;
316
317 return 0;
318 }
319
320 static int
321 falcon_v1_wait_idle(struct nvkm_falcon *falcon)
322 {
323 struct nvkm_device *device = falcon->owner->device;
324 int ret;
325
326 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0);
327 if (ret < 0)
328 return ret;
329
330 return 0;
331 }
332
333 static int
334 nvkm_falcon_v1_enable(struct nvkm_falcon *falcon)
335 {
336 struct nvkm_device *device = falcon->owner->device;
337 int ret;
338
339 ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0);
340 if (ret < 0) {
341 nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n");
342 return ret;
343 }
344
345 ret = falcon_v1_wait_idle(falcon);
346 if (ret)
347 return ret;
348
349
350 nvkm_falcon_wr32(falcon, 0x010, 0xff);
351
352 return 0;
353 }
354
355 static void
356 nvkm_falcon_v1_disable(struct nvkm_falcon *falcon)
357 {
358
359 nvkm_falcon_wr32(falcon, 0x014, 0xff);
360 falcon_v1_wait_idle(falcon);
361 }
362
363 static const struct nvkm_falcon_func
364 nvkm_falcon_v1 = {
365 .load_imem = nvkm_falcon_v1_load_imem,
366 .load_dmem = nvkm_falcon_v1_load_dmem,
367 .read_dmem = nvkm_falcon_v1_read_dmem,
368 .bind_context = nvkm_falcon_v1_bind_context,
369 .start = nvkm_falcon_v1_start,
370 .wait_for_halt = nvkm_falcon_v1_wait_for_halt,
371 .clear_interrupt = nvkm_falcon_v1_clear_interrupt,
372 .enable = nvkm_falcon_v1_enable,
373 .disable = nvkm_falcon_v1_disable,
374 .set_start_addr = nvkm_falcon_v1_set_start_addr,
375 };
376
377 int
378 nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
379 struct nvkm_falcon **pfalcon)
380 {
381 struct nvkm_falcon *falcon;
382 if (!(falcon = *pfalcon = kzalloc(sizeof(*falcon), GFP_KERNEL)))
383 return -ENOMEM;
384 nvkm_falcon_ctor(&nvkm_falcon_v1, owner, name, addr, falcon);
385 return 0;
386 }