This source file includes following definitions.
- nouveau_ivmm_find
- nouveau_svmm_bind
- nouveau_svmm_part
- nouveau_svmm_join
- nouveau_svmm_invalidate
- nouveau_svmm_sync_cpu_device_pagetables
- nouveau_svmm_release
- nouveau_svmm_fini
- nouveau_svmm_init
- nouveau_svm_fault_replay
- nouveau_svm_fault_cancel
- nouveau_svm_fault_cancel_fault
- nouveau_svm_fault_cmp
- nouveau_svm_fault_cache
- nouveau_range_done
- nouveau_range_fault
- nouveau_svm_fault
- nouveau_svm_fault_buffer_fini
- nouveau_svm_fault_buffer_init
- nouveau_svm_fault_buffer_dtor
- nouveau_svm_fault_buffer_ctor
- nouveau_svm_resume
- nouveau_svm_suspend
- nouveau_svm_fini
- nouveau_svm_init
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
26
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
29 #include <nvif/vmm.h>
30
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
34
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
38
39 struct nouveau_svm {
40 struct nouveau_drm *drm;
41 struct mutex mutex;
42 struct list_head inst;
43
44 struct nouveau_svm_fault_buffer {
45 int id;
46 struct nvif_object object;
47 u32 entries;
48 u32 getaddr;
49 u32 putaddr;
50 u32 get;
51 u32 put;
52 struct nvif_notify notify;
53
54 struct nouveau_svm_fault {
55 u64 inst;
56 u64 addr;
57 u64 time;
58 u32 engine;
59 u8 gpc;
60 u8 hub;
61 u8 access;
62 u8 client;
63 u8 fault;
64 struct nouveau_svmm *svmm;
65 } **fault;
66 int fault_nr;
67 } buffer[1];
68 };
69
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
72
73 struct nouveau_ivmm {
74 struct nouveau_svmm *svmm;
75 u64 inst;
76 struct list_head head;
77 };
78
79 static struct nouveau_ivmm *
80 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
81 {
82 struct nouveau_ivmm *ivmm;
83 list_for_each_entry(ivmm, &svm->inst, head) {
84 if (ivmm->inst == inst)
85 return ivmm;
86 }
87 return NULL;
88 }
89
90 struct nouveau_svmm {
91 struct nouveau_vmm *vmm;
92 struct {
93 unsigned long start;
94 unsigned long limit;
95 } unmanaged;
96
97 struct mutex mutex;
98
99 struct mm_struct *mm;
100 struct hmm_mirror mirror;
101 };
102
103 #define SVMM_DBG(s,f,a...) \
104 NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
105 #define SVMM_ERR(s,f,a...) \
106 NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
107
108 int
109 nouveau_svmm_bind(struct drm_device *dev, void *data,
110 struct drm_file *file_priv)
111 {
112 struct nouveau_cli *cli = nouveau_cli(file_priv);
113 struct drm_nouveau_svm_bind *args = data;
114 unsigned target, cmd, priority;
115 unsigned long addr, end, size;
116 struct mm_struct *mm;
117
118 args->va_start &= PAGE_MASK;
119 args->va_end &= PAGE_MASK;
120
121
122 if (args->reserved0 || args->reserved1)
123 return -EINVAL;
124 if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
125 return -EINVAL;
126 if (args->va_start >= args->va_end)
127 return -EINVAL;
128 if (!args->npages)
129 return -EINVAL;
130
131 cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
132 cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
133 switch (cmd) {
134 case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
135 break;
136 default:
137 return -EINVAL;
138 }
139
140 priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
141 priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
142
143
144 target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
145 target &= NOUVEAU_SVM_BIND_TARGET_MASK;
146 switch (target) {
147 case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
148 break;
149 default:
150 return -EINVAL;
151 }
152
153
154
155
156
157
158 if (args->stride)
159 return -EINVAL;
160
161 size = ((unsigned long)args->npages) << PAGE_SHIFT;
162 if ((args->va_start + size) <= args->va_start)
163 return -EINVAL;
164 if ((args->va_start + size) > args->va_end)
165 return -EINVAL;
166
167
168
169
170
171
172
173 mm = get_task_mm(current);
174 down_read(&mm->mmap_sem);
175
176 if (!cli->svm.svmm) {
177 up_read(&mm->mmap_sem);
178 return -EINVAL;
179 }
180
181 for (addr = args->va_start, end = args->va_start + size; addr < end;) {
182 struct vm_area_struct *vma;
183 unsigned long next;
184
185 vma = find_vma_intersection(mm, addr, end);
186 if (!vma)
187 break;
188
189 addr = max(addr, vma->vm_start);
190 next = min(vma->vm_end, end);
191
192 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
193 addr = next;
194 }
195
196
197
198
199
200
201 args->result = 0;
202
203 up_read(&mm->mmap_sem);
204 mmput(mm);
205
206 return 0;
207 }
208
209
210 void
211 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
212 {
213 struct nouveau_ivmm *ivmm;
214 if (svmm) {
215 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
216 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
217 if (ivmm) {
218 list_del(&ivmm->head);
219 kfree(ivmm);
220 }
221 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
222 }
223 }
224
225
226 int
227 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
228 {
229 struct nouveau_ivmm *ivmm;
230 if (svmm) {
231 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
232 return -ENOMEM;
233 ivmm->svmm = svmm;
234 ivmm->inst = inst;
235
236 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
237 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
238 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
239 }
240 return 0;
241 }
242
243
244 static void
245 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
246 {
247 if (limit > start) {
248 bool super = svmm->vmm->vmm.object.client->super;
249 svmm->vmm->vmm.object.client->super = true;
250 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
251 &(struct nvif_vmm_pfnclr_v0) {
252 .addr = start,
253 .size = limit - start,
254 }, sizeof(struct nvif_vmm_pfnclr_v0));
255 svmm->vmm->vmm.object.client->super = super;
256 }
257 }
258
259 static int
260 nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror,
261 const struct mmu_notifier_range *update)
262 {
263 struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror);
264 unsigned long start = update->start;
265 unsigned long limit = update->end;
266
267 if (!mmu_notifier_range_blockable(update))
268 return -EAGAIN;
269
270 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
271
272 mutex_lock(&svmm->mutex);
273 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
274 if (start < svmm->unmanaged.start) {
275 nouveau_svmm_invalidate(svmm, start,
276 svmm->unmanaged.limit);
277 }
278 start = svmm->unmanaged.limit;
279 }
280
281 nouveau_svmm_invalidate(svmm, start, limit);
282 mutex_unlock(&svmm->mutex);
283 return 0;
284 }
285
286 static void
287 nouveau_svmm_release(struct hmm_mirror *mirror)
288 {
289 }
290
291 static const struct hmm_mirror_ops
292 nouveau_svmm = {
293 .sync_cpu_device_pagetables = nouveau_svmm_sync_cpu_device_pagetables,
294 .release = nouveau_svmm_release,
295 };
296
297 void
298 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
299 {
300 struct nouveau_svmm *svmm = *psvmm;
301 if (svmm) {
302 hmm_mirror_unregister(&svmm->mirror);
303 kfree(*psvmm);
304 *psvmm = NULL;
305 }
306 }
307
308 int
309 nouveau_svmm_init(struct drm_device *dev, void *data,
310 struct drm_file *file_priv)
311 {
312 struct nouveau_cli *cli = nouveau_cli(file_priv);
313 struct nouveau_svmm *svmm;
314 struct drm_nouveau_svm_init *args = data;
315 int ret;
316
317
318 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
319 return -ENOMEM;
320 svmm->vmm = &cli->svm;
321 svmm->unmanaged.start = args->unmanaged_addr;
322 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
323 mutex_init(&svmm->mutex);
324
325
326 mutex_lock(&cli->mutex);
327 if (cli->svm.cli) {
328 ret = -EBUSY;
329 goto done;
330 }
331
332
333
334
335
336
337
338 ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
339 args->unmanaged_addr, args->unmanaged_size,
340 &(struct gp100_vmm_v0) {
341 .fault_replay = true,
342 }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
343 if (ret)
344 goto done;
345
346
347 svmm->mm = get_task_mm(current);
348 down_write(&svmm->mm->mmap_sem);
349 svmm->mirror.ops = &nouveau_svmm;
350 ret = hmm_mirror_register(&svmm->mirror, svmm->mm);
351 if (ret == 0) {
352 cli->svm.svmm = svmm;
353 cli->svm.cli = cli;
354 }
355 up_write(&svmm->mm->mmap_sem);
356 mmput(svmm->mm);
357
358 done:
359 if (ret)
360 nouveau_svmm_fini(&svmm);
361 mutex_unlock(&cli->mutex);
362 return ret;
363 }
364
365 static const u64
366 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
367 [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
368 [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
369 [HMM_PFN_DEVICE_PRIVATE] = NVIF_VMM_PFNMAP_V0_VRAM,
370 };
371
372 static const u64
373 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
374 [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
375 [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
376 [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
377 };
378
379
380 static void
381 nouveau_svm_fault_replay(struct nouveau_svm *svm)
382 {
383 SVM_DBG(svm, "replay");
384 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
385 GP100_VMM_VN_FAULT_REPLAY,
386 &(struct gp100_vmm_fault_replay_vn) {},
387 sizeof(struct gp100_vmm_fault_replay_vn)));
388 }
389
390
391
392
393
394
395 static void
396 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
397 u64 inst, u8 hub, u8 gpc, u8 client)
398 {
399 SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
400 WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
401 GP100_VMM_VN_FAULT_CANCEL,
402 &(struct gp100_vmm_fault_cancel_v0) {
403 .hub = hub,
404 .gpc = gpc,
405 .client = client,
406 .inst = inst,
407 }, sizeof(struct gp100_vmm_fault_cancel_v0)));
408 }
409
410 static void
411 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
412 struct nouveau_svm_fault *fault)
413 {
414 nouveau_svm_fault_cancel(svm, fault->inst,
415 fault->hub,
416 fault->gpc,
417 fault->client);
418 }
419
420 static int
421 nouveau_svm_fault_cmp(const void *a, const void *b)
422 {
423 const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
424 const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
425 int ret;
426 if ((ret = (s64)fa->inst - fb->inst))
427 return ret;
428 if ((ret = (s64)fa->addr - fb->addr))
429 return ret;
430
431 return (fa->access == 0 || fa->access == 3) -
432 (fb->access == 0 || fb->access == 3);
433 }
434
435 static void
436 nouveau_svm_fault_cache(struct nouveau_svm *svm,
437 struct nouveau_svm_fault_buffer *buffer, u32 offset)
438 {
439 struct nvif_object *memory = &buffer->object;
440 const u32 instlo = nvif_rd32(memory, offset + 0x00);
441 const u32 insthi = nvif_rd32(memory, offset + 0x04);
442 const u32 addrlo = nvif_rd32(memory, offset + 0x08);
443 const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
444 const u32 timelo = nvif_rd32(memory, offset + 0x10);
445 const u32 timehi = nvif_rd32(memory, offset + 0x14);
446 const u32 engine = nvif_rd32(memory, offset + 0x18);
447 const u32 info = nvif_rd32(memory, offset + 0x1c);
448 const u64 inst = (u64)insthi << 32 | instlo;
449 const u8 gpc = (info & 0x1f000000) >> 24;
450 const u8 hub = (info & 0x00100000) >> 20;
451 const u8 client = (info & 0x00007f00) >> 8;
452 struct nouveau_svm_fault *fault;
453
454
455 if (WARN_ON(!(info & 0x80000000)))
456 return;
457
458 nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
459
460 if (!buffer->fault[buffer->fault_nr]) {
461 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
462 if (WARN_ON(!fault)) {
463 nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
464 return;
465 }
466 buffer->fault[buffer->fault_nr] = fault;
467 }
468
469 fault = buffer->fault[buffer->fault_nr++];
470 fault->inst = inst;
471 fault->addr = (u64)addrhi << 32 | addrlo;
472 fault->time = (u64)timehi << 32 | timelo;
473 fault->engine = engine;
474 fault->gpc = gpc;
475 fault->hub = hub;
476 fault->access = (info & 0x000f0000) >> 16;
477 fault->client = client;
478 fault->fault = (info & 0x0000001f);
479
480 SVM_DBG(svm, "fault %016llx %016llx %02x",
481 fault->inst, fault->addr, fault->access);
482 }
483
484 static inline bool
485 nouveau_range_done(struct hmm_range *range)
486 {
487 bool ret = hmm_range_valid(range);
488
489 hmm_range_unregister(range);
490 return ret;
491 }
492
493 static int
494 nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
495 {
496 long ret;
497
498 range->default_flags = 0;
499 range->pfn_flags_mask = -1UL;
500
501 ret = hmm_range_register(range, &svmm->mirror);
502 if (ret) {
503 up_read(&svmm->mm->mmap_sem);
504 return (int)ret;
505 }
506
507 if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
508 up_read(&svmm->mm->mmap_sem);
509 return -EBUSY;
510 }
511
512 ret = hmm_range_fault(range, 0);
513 if (ret <= 0) {
514 if (ret == 0)
515 ret = -EBUSY;
516 up_read(&svmm->mm->mmap_sem);
517 hmm_range_unregister(range);
518 return ret;
519 }
520 return 0;
521 }
522
523 static int
524 nouveau_svm_fault(struct nvif_notify *notify)
525 {
526 struct nouveau_svm_fault_buffer *buffer =
527 container_of(notify, typeof(*buffer), notify);
528 struct nouveau_svm *svm =
529 container_of(buffer, typeof(*svm), buffer[buffer->id]);
530 struct nvif_object *device = &svm->drm->client.device.object;
531 struct nouveau_svmm *svmm;
532 struct {
533 struct {
534 struct nvif_ioctl_v0 i;
535 struct nvif_ioctl_mthd_v0 m;
536 struct nvif_vmm_pfnmap_v0 p;
537 } i;
538 u64 phys[16];
539 } args;
540 struct hmm_range range;
541 struct vm_area_struct *vma;
542 u64 inst, start, limit;
543 int fi, fn, pi, fill;
544 int replay = 0, ret;
545
546
547
548
549 SVM_DBG(svm, "fault handler");
550 if (buffer->get == buffer->put) {
551 buffer->put = nvif_rd32(device, buffer->putaddr);
552 buffer->get = nvif_rd32(device, buffer->getaddr);
553 if (buffer->get == buffer->put)
554 return NVIF_NOTIFY_KEEP;
555 }
556 buffer->fault_nr = 0;
557
558 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
559 while (buffer->get != buffer->put) {
560 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
561 if (++buffer->get == buffer->entries)
562 buffer->get = 0;
563 }
564 nvif_wr32(device, buffer->getaddr, buffer->get);
565 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
566
567
568
569
570
571 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
572 nouveau_svm_fault_cmp, NULL);
573
574
575 mutex_lock(&svm->mutex);
576 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
577 if (!svmm || buffer->fault[fi]->inst != inst) {
578 struct nouveau_ivmm *ivmm =
579 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
580 svmm = ivmm ? ivmm->svmm : NULL;
581 inst = buffer->fault[fi]->inst;
582 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
583 }
584 buffer->fault[fi]->svmm = svmm;
585 }
586 mutex_unlock(&svm->mutex);
587
588
589 args.i.i.version = 0;
590 args.i.i.type = NVIF_IOCTL_V0_MTHD;
591 args.i.m.version = 0;
592 args.i.m.method = NVIF_VMM_V0_PFNMAP;
593 args.i.p.version = 0;
594
595 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
596
597 if (!(svmm = buffer->fault[fi]->svmm)) {
598 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
599 continue;
600 }
601 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
602
603
604
605
606 start = buffer->fault[fi]->addr;
607 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
608 if (start < svmm->unmanaged.limit)
609 limit = min_t(u64, limit, svmm->unmanaged.start);
610 else
611 if (limit > svmm->unmanaged.start)
612 start = max_t(u64, start, svmm->unmanaged.limit);
613 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
614
615
616
617
618 down_read(&svmm->mm->mmap_sem);
619 vma = find_vma_intersection(svmm->mm, start, limit);
620 if (!vma) {
621 SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
622 up_read(&svmm->mm->mmap_sem);
623 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
624 continue;
625 }
626 start = max_t(u64, start, vma->vm_start);
627 limit = min_t(u64, limit, vma->vm_end);
628 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
629
630 if (buffer->fault[fi]->addr != start) {
631 SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
632 up_read(&svmm->mm->mmap_sem);
633 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
634 continue;
635 }
636
637
638
639
640
641 args.i.p.page = PAGE_SHIFT;
642 args.i.p.addr = start;
643 for (fn = fi, pi = 0;;) {
644
645
646
647
648 if (buffer->fault[fn]->access != 0 &&
649 buffer->fault[fn]->access != 3 ) {
650 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
651 NVIF_VMM_PFNMAP_V0_W;
652 } else {
653 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
654 }
655 args.i.p.size = pi << PAGE_SHIFT;
656
657
658
659
660
661
662
663
664 while (++fn < buffer->fault_nr &&
665 buffer->fault[fn]->svmm == svmm &&
666 buffer->fault[fn ]->addr ==
667 buffer->fault[fn - 1]->addr);
668
669
670
671
672 if (fn >= buffer->fault_nr ||
673 buffer->fault[fn]->svmm != svmm ||
674 buffer->fault[fn]->addr >= limit)
675 break;
676
677
678 fill = (buffer->fault[fn ]->addr -
679 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
680 while (--fill)
681 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
682 }
683
684 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
685 args.i.p.addr,
686 args.i.p.addr + args.i.p.size, fn - fi);
687
688
689 range.start = args.i.p.addr;
690 range.end = args.i.p.addr + args.i.p.size;
691 range.pfns = args.phys;
692 range.flags = nouveau_svm_pfn_flags;
693 range.values = nouveau_svm_pfn_values;
694 range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
695 again:
696 ret = nouveau_range_fault(svmm, &range);
697 if (ret == 0) {
698 mutex_lock(&svmm->mutex);
699 if (!nouveau_range_done(&range)) {
700 mutex_unlock(&svmm->mutex);
701 goto again;
702 }
703
704 nouveau_dmem_convert_pfn(svm->drm, &range);
705
706 svmm->vmm->vmm.object.client->super = true;
707 ret = nvif_object_ioctl(&svmm->vmm->vmm.object,
708 &args, sizeof(args.i) +
709 pi * sizeof(args.phys[0]),
710 NULL);
711 svmm->vmm->vmm.object.client->super = false;
712 mutex_unlock(&svmm->mutex);
713 up_read(&svmm->mm->mmap_sem);
714 }
715
716
717
718
719
720
721 while (fi < fn) {
722 struct nouveau_svm_fault *fault = buffer->fault[fi++];
723 pi = (fault->addr - range.start) >> PAGE_SHIFT;
724 if (ret ||
725 !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) ||
726 (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) &&
727 fault->access != 0 && fault->access != 3)) {
728 nouveau_svm_fault_cancel_fault(svm, fault);
729 continue;
730 }
731 replay++;
732 }
733 }
734
735
736 if (replay)
737 nouveau_svm_fault_replay(svm);
738 return NVIF_NOTIFY_KEEP;
739 }
740
741 static void
742 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
743 {
744 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
745 nvif_notify_put(&buffer->notify);
746 }
747
748 static int
749 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
750 {
751 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
752 struct nvif_object *device = &svm->drm->client.device.object;
753 buffer->get = nvif_rd32(device, buffer->getaddr);
754 buffer->put = nvif_rd32(device, buffer->putaddr);
755 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
756 return nvif_notify_get(&buffer->notify);
757 }
758
759 static void
760 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
761 {
762 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
763 int i;
764
765 if (buffer->fault) {
766 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
767 kfree(buffer->fault[i]);
768 kvfree(buffer->fault);
769 }
770
771 nouveau_svm_fault_buffer_fini(svm, id);
772
773 nvif_notify_fini(&buffer->notify);
774 nvif_object_fini(&buffer->object);
775 }
776
777 static int
778 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
779 {
780 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
781 struct nouveau_drm *drm = svm->drm;
782 struct nvif_object *device = &drm->client.device.object;
783 struct nvif_clb069_v0 args = {};
784 int ret;
785
786 buffer->id = id;
787
788 ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
789 &buffer->object);
790 if (ret < 0) {
791 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
792 return ret;
793 }
794
795 nvif_object_map(&buffer->object, NULL, 0);
796 buffer->entries = args.entries;
797 buffer->getaddr = args.get;
798 buffer->putaddr = args.put;
799
800 ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
801 NVB069_V0_NTFY_FAULT, NULL, 0, 0,
802 &buffer->notify);
803 if (ret)
804 return ret;
805
806 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
807 if (!buffer->fault)
808 return -ENOMEM;
809
810 return nouveau_svm_fault_buffer_init(svm, id);
811 }
812
813 void
814 nouveau_svm_resume(struct nouveau_drm *drm)
815 {
816 struct nouveau_svm *svm = drm->svm;
817 if (svm)
818 nouveau_svm_fault_buffer_init(svm, 0);
819 }
820
821 void
822 nouveau_svm_suspend(struct nouveau_drm *drm)
823 {
824 struct nouveau_svm *svm = drm->svm;
825 if (svm)
826 nouveau_svm_fault_buffer_fini(svm, 0);
827 }
828
829 void
830 nouveau_svm_fini(struct nouveau_drm *drm)
831 {
832 struct nouveau_svm *svm = drm->svm;
833 if (svm) {
834 nouveau_svm_fault_buffer_dtor(svm, 0);
835 kfree(drm->svm);
836 drm->svm = NULL;
837 }
838 }
839
840 void
841 nouveau_svm_init(struct nouveau_drm *drm)
842 {
843 static const struct nvif_mclass buffers[] = {
844 { VOLTA_FAULT_BUFFER_A, 0 },
845 { MAXWELL_FAULT_BUFFER_A, 0 },
846 {}
847 };
848 struct nouveau_svm *svm;
849 int ret;
850
851
852
853
854
855 if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
856 return;
857
858 if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
859 return;
860
861 drm->svm->drm = drm;
862 mutex_init(&drm->svm->mutex);
863 INIT_LIST_HEAD(&drm->svm->inst);
864
865 ret = nvif_mclass(&drm->client.device.object, buffers);
866 if (ret < 0) {
867 SVM_DBG(svm, "No supported fault buffer class");
868 nouveau_svm_fini(drm);
869 return;
870 }
871
872 ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
873 if (ret) {
874 nouveau_svm_fini(drm);
875 return;
876 }
877
878 SVM_DBG(svm, "Initialised");
879 }