This source file includes following definitions.
- nv50_vmm_pgt_pte
- nv50_vmm_pgt_sgl
- nv50_vmm_pgt_dma
- nv50_vmm_pgt_mem
- nv50_vmm_pgt_unmap
- nv50_vmm_pde
- nv50_vmm_pgd_pde
- nv50_vmm_flush
- nv50_vmm_valid
- nv50_vmm_part
- nv50_vmm_join
- nv50_vmm_new
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include "vmm.h"
23
24 #include <subdev/fb.h>
25 #include <subdev/timer.h>
26 #include <engine/gr.h>
27
28 #include <nvif/if500d.h>
29 #include <nvif/unpack.h>
30
31 static inline void
32 nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
33 u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
34 {
35 u64 next = addr + map->type, data;
36 u32 pten;
37 int log2blk;
38
39 map->type += ptes * map->ctag;
40
41 while (ptes) {
42 for (log2blk = 7; log2blk >= 0; log2blk--) {
43 pten = 1 << log2blk;
44 if (ptes >= pten && IS_ALIGNED(ptei, pten))
45 break;
46 }
47
48 data = next | (log2blk << 7);
49 next += pten * map->next;
50 ptes -= pten;
51
52 while (pten--)
53 VMM_WO064(pt, vmm, ptei++ * 8, data);
54 }
55 }
56
57 static void
58 nv50_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
59 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
60 {
61 VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
62 }
63
64 static void
65 nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
66 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
67 {
68 if (map->page->shift == PAGE_SHIFT) {
69 VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes);
70 nvkm_kmap(pt->memory);
71 while (ptes--) {
72 const u64 data = *map->dma++ + map->type;
73 VMM_WO064(pt, vmm, ptei++ * 8, data);
74 map->type += map->ctag;
75 }
76 nvkm_done(pt->memory);
77 return;
78 }
79
80 VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
81 }
82
83 static void
84 nv50_vmm_pgt_mem(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
85 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
86 {
87 VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte);
88 }
89
90 static void
91 nv50_vmm_pgt_unmap(struct nvkm_vmm *vmm,
92 struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
93 {
94 VMM_FO064(pt, vmm, ptei * 8, 0ULL, ptes);
95 }
96
97 static const struct nvkm_vmm_desc_func
98 nv50_vmm_pgt = {
99 .unmap = nv50_vmm_pgt_unmap,
100 .mem = nv50_vmm_pgt_mem,
101 .dma = nv50_vmm_pgt_dma,
102 .sgl = nv50_vmm_pgt_sgl,
103 };
104
105 static bool
106 nv50_vmm_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgt, u64 *pdata)
107 {
108 struct nvkm_mmu_pt *pt;
109 u64 data = 0xdeadcafe00000000ULL;
110 if (pgt && (pt = pgt->pt[0])) {
111 switch (pgt->page) {
112 case 16: data = 0x00000001; break;
113 case 12: data = 0x00000003;
114 switch (nvkm_memory_size(pt->memory)) {
115 case 0x100000: data |= 0x00000000; break;
116 case 0x040000: data |= 0x00000020; break;
117 case 0x020000: data |= 0x00000040; break;
118 case 0x010000: data |= 0x00000060; break;
119 default:
120 WARN_ON(1);
121 return false;
122 }
123 break;
124 default:
125 WARN_ON(1);
126 return false;
127 }
128
129 switch (nvkm_memory_target(pt->memory)) {
130 case NVKM_MEM_TARGET_VRAM: data |= 0x00000000; break;
131 case NVKM_MEM_TARGET_HOST: data |= 0x00000008; break;
132 case NVKM_MEM_TARGET_NCOH: data |= 0x0000000c; break;
133 default:
134 WARN_ON(1);
135 return false;
136 }
137
138 data |= pt->addr;
139 }
140 *pdata = data;
141 return true;
142 }
143
144 static void
145 nv50_vmm_pgd_pde(struct nvkm_vmm *vmm, struct nvkm_vmm_pt *pgd, u32 pdei)
146 {
147 struct nvkm_vmm_join *join;
148 u32 pdeo = vmm->mmu->func->vmm.pd_offset + (pdei * 8);
149 u64 data;
150
151 if (!nv50_vmm_pde(vmm, pgd->pde[pdei], &data))
152 return;
153
154 list_for_each_entry(join, &vmm->join, head) {
155 nvkm_kmap(join->inst);
156 nvkm_wo64(join->inst, pdeo, data);
157 nvkm_done(join->inst);
158 }
159 }
160
161 static const struct nvkm_vmm_desc_func
162 nv50_vmm_pgd = {
163 .pde = nv50_vmm_pgd_pde,
164 };
165
166 const struct nvkm_vmm_desc
167 nv50_vmm_desc_12[] = {
168 { PGT, 17, 8, 0x1000, &nv50_vmm_pgt },
169 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
170 {}
171 };
172
173 const struct nvkm_vmm_desc
174 nv50_vmm_desc_16[] = {
175 { PGT, 13, 8, 0x1000, &nv50_vmm_pgt },
176 { PGD, 11, 0, 0x0000, &nv50_vmm_pgd },
177 {}
178 };
179
180 void
181 nv50_vmm_flush(struct nvkm_vmm *vmm, int level)
182 {
183 struct nvkm_subdev *subdev = &vmm->mmu->subdev;
184 struct nvkm_device *device = subdev->device;
185 int i, id;
186
187 mutex_lock(&subdev->mutex);
188 for (i = 0; i < NVKM_SUBDEV_NR; i++) {
189 if (!atomic_read(&vmm->engref[i]))
190 continue;
191
192
193 if (i == NVKM_ENGINE_GR && device->gr) {
194 int ret = nvkm_gr_tlb_flush(device->gr);
195 if (ret != -ENODEV)
196 continue;
197 }
198
199 switch (i) {
200 case NVKM_ENGINE_GR : id = 0x00; break;
201 case NVKM_ENGINE_VP :
202 case NVKM_ENGINE_MSPDEC: id = 0x01; break;
203 case NVKM_SUBDEV_BAR : id = 0x06; break;
204 case NVKM_ENGINE_MSPPP :
205 case NVKM_ENGINE_MPEG : id = 0x08; break;
206 case NVKM_ENGINE_BSP :
207 case NVKM_ENGINE_MSVLD : id = 0x09; break;
208 case NVKM_ENGINE_CIPHER:
209 case NVKM_ENGINE_SEC : id = 0x0a; break;
210 case NVKM_ENGINE_CE0 : id = 0x0d; break;
211 default:
212 continue;
213 }
214
215 nvkm_wr32(device, 0x100c80, (id << 16) | 1);
216 if (nvkm_msec(device, 2000,
217 if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
218 break;
219 ) < 0)
220 nvkm_error(subdev, "%s mmu invalidate timeout\n",
221 nvkm_subdev_name[i]);
222 }
223 mutex_unlock(&subdev->mutex);
224 }
225
226 int
227 nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
228 struct nvkm_vmm_map *map)
229 {
230 const struct nvkm_vmm_page *page = map->page;
231 union {
232 struct nv50_vmm_map_vn vn;
233 struct nv50_vmm_map_v0 v0;
234 } *args = argv;
235 struct nvkm_device *device = vmm->mmu->subdev.device;
236 struct nvkm_ram *ram = device->fb->ram;
237 struct nvkm_memory *memory = map->memory;
238 u8 aper, kind, comp, priv, ro;
239 int kindn, ret = -ENOSYS;
240 const u8 *kindm;
241
242 map->type = map->ctag = 0;
243 map->next = 1 << page->shift;
244
245 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
246 ro = !!args->v0.ro;
247 priv = !!args->v0.priv;
248 kind = args->v0.kind & 0x7f;
249 comp = args->v0.comp & 0x03;
250 } else
251 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) {
252 ro = 0;
253 priv = 0;
254 kind = 0x00;
255 comp = 0;
256 } else {
257 VMM_DEBUG(vmm, "args");
258 return ret;
259 }
260
261 switch (nvkm_memory_target(memory)) {
262 case NVKM_MEM_TARGET_VRAM:
263 if (ram->stolen) {
264 map->type |= ram->stolen;
265 aper = 3;
266 } else {
267 aper = 0;
268 }
269 break;
270 case NVKM_MEM_TARGET_HOST:
271 aper = 2;
272 break;
273 case NVKM_MEM_TARGET_NCOH:
274 aper = 3;
275 break;
276 default:
277 WARN_ON(1);
278 return -EINVAL;
279 }
280
281 kindm = vmm->mmu->func->kind(vmm->mmu, &kindn);
282 if (kind >= kindn || kindm[kind] == 0x7f) {
283 VMM_DEBUG(vmm, "kind %02x", kind);
284 return -EINVAL;
285 }
286
287 if (map->mem && map->mem->type != kindm[kind]) {
288 VMM_DEBUG(vmm, "kind %02x bankswz: %d %d", kind,
289 kindm[kind], map->mem->type);
290 return -EINVAL;
291 }
292
293 if (comp) {
294 u32 tags = (nvkm_memory_size(memory) >> 16) * comp;
295 if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
296 VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
297 return -EINVAL;
298 }
299
300 ret = nvkm_memory_tags_get(memory, device, tags, NULL,
301 &map->tags);
302 if (ret) {
303 VMM_DEBUG(vmm, "comp %d", ret);
304 return ret;
305 }
306
307 if (map->tags->mn) {
308 u32 tags = map->tags->mn->offset + (map->offset >> 16);
309 map->ctag |= (u64)comp << 49;
310 map->type |= (u64)comp << 47;
311 map->type |= (u64)tags << 49;
312 map->next |= map->ctag;
313 }
314 }
315
316 map->type |= BIT(0);
317 map->type |= (u64)ro << 3;
318 map->type |= (u64)aper << 4;
319 map->type |= (u64)priv << 6;
320 map->type |= (u64)kind << 40;
321 return 0;
322 }
323
324 void
325 nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
326 {
327 struct nvkm_vmm_join *join;
328
329 list_for_each_entry(join, &vmm->join, head) {
330 if (join->inst == inst) {
331 list_del(&join->head);
332 kfree(join);
333 break;
334 }
335 }
336 }
337
338 int
339 nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
340 {
341 const u32 pd_offset = vmm->mmu->func->vmm.pd_offset;
342 struct nvkm_vmm_join *join;
343 int ret = 0;
344 u64 data;
345 u32 pdei;
346
347 if (!(join = kmalloc(sizeof(*join), GFP_KERNEL)))
348 return -ENOMEM;
349 join->inst = inst;
350 list_add_tail(&join->head, &vmm->join);
351
352 nvkm_kmap(join->inst);
353 for (pdei = vmm->start >> 29; pdei <= (vmm->limit - 1) >> 29; pdei++) {
354 if (!nv50_vmm_pde(vmm, vmm->pd->pde[pdei], &data)) {
355 ret = -EINVAL;
356 break;
357 }
358 nvkm_wo64(join->inst, pd_offset + (pdei * 8), data);
359 }
360 nvkm_done(join->inst);
361 return ret;
362 }
363
364 static const struct nvkm_vmm_func
365 nv50_vmm = {
366 .join = nv50_vmm_join,
367 .part = nv50_vmm_part,
368 .valid = nv50_vmm_valid,
369 .flush = nv50_vmm_flush,
370 .page_block = 1 << 29,
371 .page = {
372 { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxC },
373 { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx },
374 {}
375 }
376 };
377
378 int
379 nv50_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size,
380 void *argv, u32 argc, struct lock_class_key *key, const char *name,
381 struct nvkm_vmm **pvmm)
382 {
383 return nv04_vmm_new_(&nv50_vmm, mmu, 0, managed, addr, size,
384 argv, argc, key, name, pvmm);
385 }