This source file includes following definitions.
- amdgpu_bo_subtract_pin_size
- amdgpu_bo_destroy
- amdgpu_bo_is_amdgpu_bo
- amdgpu_bo_placement_from_domain
- amdgpu_bo_create_reserved
- amdgpu_bo_create_kernel
- amdgpu_bo_create_kernel_at
- amdgpu_bo_free_kernel
- amdgpu_bo_validate_size
- amdgpu_bo_support_uswc
- amdgpu_bo_do_create
- amdgpu_bo_create_shadow
- amdgpu_bo_create
- amdgpu_bo_validate
- amdgpu_bo_restore_shadow
- amdgpu_bo_kmap
- amdgpu_bo_kptr
- amdgpu_bo_kunmap
- amdgpu_bo_ref
- amdgpu_bo_unref
- amdgpu_bo_pin_restricted
- amdgpu_bo_pin
- amdgpu_bo_unpin
- amdgpu_bo_evict_vram
- amdgpu_bo_init
- amdgpu_bo_late_init
- amdgpu_bo_fini
- amdgpu_bo_fbdev_mmap
- amdgpu_bo_set_tiling_flags
- amdgpu_bo_get_tiling_flags
- amdgpu_bo_set_metadata
- amdgpu_bo_get_metadata
- amdgpu_bo_move_notify
- amdgpu_bo_release_notify
- amdgpu_bo_fault_reserve_notify
- amdgpu_bo_fence
- amdgpu_bo_sync_wait
- amdgpu_bo_gpu_offset
- amdgpu_bo_get_preferred_pin_domain
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #include <linux/list.h>
33 #include <linux/slab.h>
34
35 #include <drm/amdgpu_drm.h>
36 #include <drm/drm_cache.h>
37 #include "amdgpu.h"
38 #include "amdgpu_trace.h"
39 #include "amdgpu_amdkfd.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
63 {
64 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
65
66 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
67 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
68 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
69 &adev->visible_pin_size);
70 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
71 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
72 }
73 }
74
75 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
76 {
77 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
78 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
79
80 if (bo->pin_count > 0)
81 amdgpu_bo_subtract_pin_size(bo);
82
83 amdgpu_bo_kunmap(bo);
84
85 if (bo->tbo.base.import_attach)
86 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
87 drm_gem_object_release(&bo->tbo.base);
88
89 if (!list_empty(&bo->shadow_list)) {
90 mutex_lock(&adev->shadow_list_lock);
91 list_del_init(&bo->shadow_list);
92 mutex_unlock(&adev->shadow_list_lock);
93 }
94 amdgpu_bo_unref(&bo->parent);
95
96 kfree(bo->metadata);
97 kfree(bo);
98 }
99
100
101
102
103
104
105
106
107
108
109
110 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
111 {
112 if (bo->destroy == &amdgpu_bo_destroy)
113 return true;
114 return false;
115 }
116
117
118
119
120
121
122
123
124
125 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
126 {
127 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
128 struct ttm_placement *placement = &abo->placement;
129 struct ttm_place *places = abo->placements;
130 u64 flags = abo->flags;
131 u32 c = 0;
132
133 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
134 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
135
136 places[c].fpfn = 0;
137 places[c].lpfn = 0;
138 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
139 TTM_PL_FLAG_VRAM;
140
141 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
142 places[c].lpfn = visible_pfn;
143 else
144 places[c].flags |= TTM_PL_FLAG_TOPDOWN;
145
146 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
147 places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
148 c++;
149 }
150
151 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
152 places[c].fpfn = 0;
153 places[c].lpfn = 0;
154 places[c].flags = TTM_PL_FLAG_TT;
155 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
156 places[c].flags |= TTM_PL_FLAG_WC |
157 TTM_PL_FLAG_UNCACHED;
158 else
159 places[c].flags |= TTM_PL_FLAG_CACHED;
160 c++;
161 }
162
163 if (domain & AMDGPU_GEM_DOMAIN_CPU) {
164 places[c].fpfn = 0;
165 places[c].lpfn = 0;
166 places[c].flags = TTM_PL_FLAG_SYSTEM;
167 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
168 places[c].flags |= TTM_PL_FLAG_WC |
169 TTM_PL_FLAG_UNCACHED;
170 else
171 places[c].flags |= TTM_PL_FLAG_CACHED;
172 c++;
173 }
174
175 if (domain & AMDGPU_GEM_DOMAIN_GDS) {
176 places[c].fpfn = 0;
177 places[c].lpfn = 0;
178 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
179 c++;
180 }
181
182 if (domain & AMDGPU_GEM_DOMAIN_GWS) {
183 places[c].fpfn = 0;
184 places[c].lpfn = 0;
185 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
186 c++;
187 }
188
189 if (domain & AMDGPU_GEM_DOMAIN_OA) {
190 places[c].fpfn = 0;
191 places[c].lpfn = 0;
192 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
193 c++;
194 }
195
196 if (!c) {
197 places[c].fpfn = 0;
198 places[c].lpfn = 0;
199 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
200 c++;
201 }
202
203 BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
204
205 placement->num_placement = c;
206 placement->placement = places;
207
208 placement->num_busy_placement = c;
209 placement->busy_placement = places;
210 }
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
232 unsigned long size, int align,
233 u32 domain, struct amdgpu_bo **bo_ptr,
234 u64 *gpu_addr, void **cpu_addr)
235 {
236 struct amdgpu_bo_param bp;
237 bool free = false;
238 int r;
239
240 if (!size) {
241 amdgpu_bo_unref(bo_ptr);
242 return 0;
243 }
244
245 memset(&bp, 0, sizeof(bp));
246 bp.size = size;
247 bp.byte_align = align;
248 bp.domain = domain;
249 bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
250 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
251 bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
252 bp.type = ttm_bo_type_kernel;
253 bp.resv = NULL;
254
255 if (!*bo_ptr) {
256 r = amdgpu_bo_create(adev, &bp, bo_ptr);
257 if (r) {
258 dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
259 r);
260 return r;
261 }
262 free = true;
263 }
264
265 r = amdgpu_bo_reserve(*bo_ptr, false);
266 if (r) {
267 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
268 goto error_free;
269 }
270
271 r = amdgpu_bo_pin(*bo_ptr, domain);
272 if (r) {
273 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
274 goto error_unreserve;
275 }
276
277 r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
278 if (r) {
279 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
280 goto error_unpin;
281 }
282
283 if (gpu_addr)
284 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
285
286 if (cpu_addr) {
287 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
288 if (r) {
289 dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
290 goto error_unpin;
291 }
292 }
293
294 return 0;
295
296 error_unpin:
297 amdgpu_bo_unpin(*bo_ptr);
298 error_unreserve:
299 amdgpu_bo_unreserve(*bo_ptr);
300
301 error_free:
302 if (free)
303 amdgpu_bo_unref(bo_ptr);
304
305 return r;
306 }
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
327 unsigned long size, int align,
328 u32 domain, struct amdgpu_bo **bo_ptr,
329 u64 *gpu_addr, void **cpu_addr)
330 {
331 int r;
332
333 r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
334 gpu_addr, cpu_addr);
335
336 if (r)
337 return r;
338
339 if (*bo_ptr)
340 amdgpu_bo_unreserve(*bo_ptr);
341
342 return 0;
343 }
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
361 uint64_t offset, uint64_t size, uint32_t domain,
362 struct amdgpu_bo **bo_ptr, void **cpu_addr)
363 {
364 struct ttm_operation_ctx ctx = { false, false };
365 unsigned int i;
366 int r;
367
368 offset &= PAGE_MASK;
369 size = ALIGN(size, PAGE_SIZE);
370
371 r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
372 NULL, NULL);
373 if (r)
374 return r;
375
376
377
378
379
380 for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
381 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
382 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
383 }
384
385 ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
386 r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
387 &(*bo_ptr)->tbo.mem, &ctx);
388 if (r)
389 goto error;
390
391 if (cpu_addr) {
392 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
393 if (r)
394 goto error;
395 }
396
397 amdgpu_bo_unreserve(*bo_ptr);
398 return 0;
399
400 error:
401 amdgpu_bo_unreserve(*bo_ptr);
402 amdgpu_bo_unref(bo_ptr);
403 return r;
404 }
405
406
407
408
409
410
411
412
413
414
415 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
416 void **cpu_addr)
417 {
418 if (*bo == NULL)
419 return;
420
421 if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
422 if (cpu_addr)
423 amdgpu_bo_kunmap(*bo);
424
425 amdgpu_bo_unpin(*bo);
426 amdgpu_bo_unreserve(*bo);
427 }
428 amdgpu_bo_unref(bo);
429
430 if (gpu_addr)
431 *gpu_addr = 0;
432
433 if (cpu_addr)
434 *cpu_addr = NULL;
435 }
436
437
438 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
439 unsigned long size, u32 domain)
440 {
441 struct ttm_mem_type_manager *man = NULL;
442
443
444
445
446
447 if (domain & AMDGPU_GEM_DOMAIN_GTT) {
448 man = &adev->mman.bdev.man[TTM_PL_TT];
449
450 if (size < (man->size << PAGE_SHIFT))
451 return true;
452 else
453 goto fail;
454 }
455
456 if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
457 man = &adev->mman.bdev.man[TTM_PL_VRAM];
458
459 if (size < (man->size << PAGE_SHIFT))
460 return true;
461 else
462 goto fail;
463 }
464
465
466
467 return true;
468
469 fail:
470 DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
471 man->size << PAGE_SHIFT);
472 return false;
473 }
474
475 bool amdgpu_bo_support_uswc(u64 bo_flags)
476 {
477
478 #ifdef CONFIG_X86_32
479
480
481
482 return false;
483 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
484
485
486
487
488
489 #ifndef CONFIG_COMPILE_TEST
490 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
491 thanks to write-combining
492 #endif
493
494 if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
495 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
496 "better performance thanks to write-combining\n");
497 return false;
498 #else
499
500
501
502 if (!drm_arch_can_wc_memory())
503 return false;
504
505 return true;
506 #endif
507 }
508
509 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
510 struct amdgpu_bo_param *bp,
511 struct amdgpu_bo **bo_ptr)
512 {
513 struct ttm_operation_ctx ctx = {
514 .interruptible = (bp->type != ttm_bo_type_kernel),
515 .no_wait_gpu = false,
516 .resv = bp->resv,
517 .flags = bp->type != ttm_bo_type_kernel ?
518 TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
519 };
520 struct amdgpu_bo *bo;
521 unsigned long page_align, size = bp->size;
522 size_t acc_size;
523 int r;
524
525
526 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
527
528 page_align = bp->byte_align;
529 size <<= PAGE_SHIFT;
530 } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
531
532 page_align = ALIGN(bp->byte_align, 4);
533 size = ALIGN(size, 4) << PAGE_SHIFT;
534 } else {
535
536 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
537 size = ALIGN(size, PAGE_SIZE);
538 }
539
540 if (!amdgpu_bo_validate_size(adev, size, bp->domain))
541 return -ENOMEM;
542
543 *bo_ptr = NULL;
544
545 acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
546 sizeof(struct amdgpu_bo));
547
548 bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
549 if (bo == NULL)
550 return -ENOMEM;
551 drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
552 INIT_LIST_HEAD(&bo->shadow_list);
553 bo->vm_bo = NULL;
554 bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
555 bp->domain;
556 bo->allowed_domains = bo->preferred_domains;
557 if (bp->type != ttm_bo_type_kernel &&
558 bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
559 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
560
561 bo->flags = bp->flags;
562
563 if (!amdgpu_bo_support_uswc(bo->flags))
564 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
565
566 bo->tbo.bdev = &adev->mman.bdev;
567 if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
568 AMDGPU_GEM_DOMAIN_GDS))
569 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
570 else
571 amdgpu_bo_placement_from_domain(bo, bp->domain);
572 if (bp->type == ttm_bo_type_kernel)
573 bo->tbo.priority = 1;
574
575 r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
576 &bo->placement, page_align, &ctx, acc_size,
577 NULL, bp->resv, &amdgpu_bo_destroy);
578 if (unlikely(r != 0))
579 return r;
580
581 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
582 bo->tbo.mem.mem_type == TTM_PL_VRAM &&
583 bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
584 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
585 ctx.bytes_moved);
586 else
587 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
588
589 if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
590 bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
591 struct dma_fence *fence;
592
593 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
594 if (unlikely(r))
595 goto fail_unreserve;
596
597 amdgpu_bo_fence(bo, fence, false);
598 dma_fence_put(bo->tbo.moving);
599 bo->tbo.moving = dma_fence_get(fence);
600 dma_fence_put(fence);
601 }
602 if (!bp->resv)
603 amdgpu_bo_unreserve(bo);
604 *bo_ptr = bo;
605
606 trace_amdgpu_bo_create(bo);
607
608
609 if (bp->type == ttm_bo_type_device)
610 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
611
612 return 0;
613
614 fail_unreserve:
615 if (!bp->resv)
616 dma_resv_unlock(bo->tbo.base.resv);
617 amdgpu_bo_unref(&bo);
618 return r;
619 }
620
621 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
622 unsigned long size,
623 struct amdgpu_bo *bo)
624 {
625 struct amdgpu_bo_param bp;
626 int r;
627
628 if (bo->shadow)
629 return 0;
630
631 memset(&bp, 0, sizeof(bp));
632 bp.size = size;
633 bp.domain = AMDGPU_GEM_DOMAIN_GTT;
634 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
635 AMDGPU_GEM_CREATE_SHADOW;
636 bp.type = ttm_bo_type_kernel;
637 bp.resv = bo->tbo.base.resv;
638
639 r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
640 if (!r) {
641 bo->shadow->parent = amdgpu_bo_ref(bo);
642 mutex_lock(&adev->shadow_list_lock);
643 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
644 mutex_unlock(&adev->shadow_list_lock);
645 }
646
647 return r;
648 }
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664 int amdgpu_bo_create(struct amdgpu_device *adev,
665 struct amdgpu_bo_param *bp,
666 struct amdgpu_bo **bo_ptr)
667 {
668 u64 flags = bp->flags;
669 int r;
670
671 bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
672 r = amdgpu_bo_do_create(adev, bp, bo_ptr);
673 if (r)
674 return r;
675
676 if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
677 if (!bp->resv)
678 WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
679 NULL));
680
681 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
682
683 if (!bp->resv)
684 dma_resv_unlock((*bo_ptr)->tbo.base.resv);
685
686 if (r)
687 amdgpu_bo_unref(bo_ptr);
688 }
689
690 return r;
691 }
692
693
694
695
696
697
698
699
700
701
702
703
704
705 int amdgpu_bo_validate(struct amdgpu_bo *bo)
706 {
707 struct ttm_operation_ctx ctx = { false, false };
708 uint32_t domain;
709 int r;
710
711 if (bo->pin_count)
712 return 0;
713
714 domain = bo->preferred_domains;
715
716 retry:
717 amdgpu_bo_placement_from_domain(bo, domain);
718 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
719 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
720 domain = bo->allowed_domains;
721 goto retry;
722 }
723
724 return r;
725 }
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
741
742 {
743 struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
744 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
745 uint64_t shadow_addr, parent_addr;
746
747 shadow_addr = amdgpu_bo_gpu_offset(shadow);
748 parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
749
750 return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
751 amdgpu_bo_size(shadow), NULL, fence,
752 true, false);
753 }
754
755
756
757
758
759
760
761
762
763
764
765
766 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
767 {
768 void *kptr;
769 long r;
770
771 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
772 return -EPERM;
773
774 kptr = amdgpu_bo_kptr(bo);
775 if (kptr) {
776 if (ptr)
777 *ptr = kptr;
778 return 0;
779 }
780
781 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
782 MAX_SCHEDULE_TIMEOUT);
783 if (r < 0)
784 return r;
785
786 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
787 if (r)
788 return r;
789
790 if (ptr)
791 *ptr = amdgpu_bo_kptr(bo);
792
793 return 0;
794 }
795
796
797
798
799
800
801
802
803
804
805 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
806 {
807 bool is_iomem;
808
809 return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
810 }
811
812
813
814
815
816
817
818 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
819 {
820 if (bo->kmap.bo)
821 ttm_bo_kunmap(&bo->kmap);
822 }
823
824
825
826
827
828
829
830
831
832
833 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
834 {
835 if (bo == NULL)
836 return NULL;
837
838 ttm_bo_get(&bo->tbo);
839 return bo;
840 }
841
842
843
844
845
846
847
848 void amdgpu_bo_unref(struct amdgpu_bo **bo)
849 {
850 struct ttm_buffer_object *tbo;
851
852 if ((*bo) == NULL)
853 return;
854
855 tbo = &((*bo)->tbo);
856 ttm_bo_put(tbo);
857 *bo = NULL;
858 }
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
883 u64 min_offset, u64 max_offset)
884 {
885 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
886 struct ttm_operation_ctx ctx = { false, false };
887 int r, i;
888
889 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
890 return -EPERM;
891
892 if (WARN_ON_ONCE(min_offset > max_offset))
893 return -EINVAL;
894
895
896 if (bo->prime_shared_count) {
897 if (domain & AMDGPU_GEM_DOMAIN_GTT)
898 domain = AMDGPU_GEM_DOMAIN_GTT;
899 else
900 return -EINVAL;
901 }
902
903
904
905
906 domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
907
908 if (bo->pin_count) {
909 uint32_t mem_type = bo->tbo.mem.mem_type;
910
911 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
912 return -EINVAL;
913
914 bo->pin_count++;
915
916 if (max_offset != 0) {
917 u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
918 WARN_ON_ONCE(max_offset <
919 (amdgpu_bo_gpu_offset(bo) - domain_start));
920 }
921
922 return 0;
923 }
924
925 bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
926
927 if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
928 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
929 amdgpu_bo_placement_from_domain(bo, domain);
930 for (i = 0; i < bo->placement.num_placement; i++) {
931 unsigned fpfn, lpfn;
932
933 fpfn = min_offset >> PAGE_SHIFT;
934 lpfn = max_offset >> PAGE_SHIFT;
935
936 if (fpfn > bo->placements[i].fpfn)
937 bo->placements[i].fpfn = fpfn;
938 if (!bo->placements[i].lpfn ||
939 (lpfn && lpfn < bo->placements[i].lpfn))
940 bo->placements[i].lpfn = lpfn;
941 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
942 }
943
944 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
945 if (unlikely(r)) {
946 dev_err(adev->dev, "%p pin failed\n", bo);
947 goto error;
948 }
949
950 bo->pin_count = 1;
951
952 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
953 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
954 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
955 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
956 &adev->visible_pin_size);
957 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
958 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
959 }
960
961 error:
962 return r;
963 }
964
965
966
967
968
969
970
971
972
973
974
975
976
977 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
978 {
979 return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
980 }
981
982
983
984
985
986
987
988
989
990
991
992 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
993 {
994 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
995 struct ttm_operation_ctx ctx = { false, false };
996 int r, i;
997
998 if (WARN_ON_ONCE(!bo->pin_count)) {
999 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1000 return 0;
1001 }
1002 bo->pin_count--;
1003 if (bo->pin_count)
1004 return 0;
1005
1006 amdgpu_bo_subtract_pin_size(bo);
1007
1008 for (i = 0; i < bo->placement.num_placement; i++) {
1009 bo->placements[i].lpfn = 0;
1010 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1011 }
1012 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1013 if (unlikely(r))
1014 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1015
1016 return r;
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1030 {
1031
1032 #ifndef CONFIG_HIBERNATION
1033 if (adev->flags & AMD_IS_APU) {
1034
1035 return 0;
1036 }
1037 #endif
1038 return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1039 }
1040
1041 static const char *amdgpu_vram_names[] = {
1042 "UNKNOWN",
1043 "GDDR1",
1044 "DDR2",
1045 "GDDR3",
1046 "GDDR4",
1047 "GDDR5",
1048 "HBM",
1049 "DDR3",
1050 "DDR4",
1051 "GDDR6",
1052 };
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 int amdgpu_bo_init(struct amdgpu_device *adev)
1064 {
1065
1066 arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1067 adev->gmc.aper_size);
1068
1069
1070 adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1071 adev->gmc.aper_size);
1072 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1073 adev->gmc.mc_vram_size >> 20,
1074 (unsigned long long)adev->gmc.aper_size >> 20);
1075 DRM_INFO("RAM width %dbits %s\n",
1076 adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1077 return amdgpu_ttm_init(adev);
1078 }
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1091 {
1092 amdgpu_ttm_late_init(adev);
1093
1094 return 0;
1095 }
1096
1097
1098
1099
1100
1101
1102
1103 void amdgpu_bo_fini(struct amdgpu_device *adev)
1104 {
1105 amdgpu_ttm_fini(adev);
1106 arch_phys_wc_del(adev->gmc.vram_mtrr);
1107 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1121 struct vm_area_struct *vma)
1122 {
1123 return ttm_fbdev_mmap(vma, &bo->tbo);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1138 {
1139 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1140
1141 if (adev->family <= AMDGPU_FAMILY_CZ &&
1142 AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1143 return -EINVAL;
1144
1145 bo->tiling_flags = tiling_flags;
1146 return 0;
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1158 {
1159 dma_resv_assert_held(bo->tbo.base.resv);
1160
1161 if (tiling_flags)
1162 *tiling_flags = bo->tiling_flags;
1163 }
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1179 uint32_t metadata_size, uint64_t flags)
1180 {
1181 void *buffer;
1182
1183 if (!metadata_size) {
1184 if (bo->metadata_size) {
1185 kfree(bo->metadata);
1186 bo->metadata = NULL;
1187 bo->metadata_size = 0;
1188 }
1189 return 0;
1190 }
1191
1192 if (metadata == NULL)
1193 return -EINVAL;
1194
1195 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1196 if (buffer == NULL)
1197 return -ENOMEM;
1198
1199 kfree(bo->metadata);
1200 bo->metadata_flags = flags;
1201 bo->metadata = buffer;
1202 bo->metadata_size = metadata_size;
1203
1204 return 0;
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1223 size_t buffer_size, uint32_t *metadata_size,
1224 uint64_t *flags)
1225 {
1226 if (!buffer && !metadata_size)
1227 return -EINVAL;
1228
1229 if (buffer) {
1230 if (buffer_size < bo->metadata_size)
1231 return -EINVAL;
1232
1233 if (bo->metadata_size)
1234 memcpy(buffer, bo->metadata, bo->metadata_size);
1235 }
1236
1237 if (metadata_size)
1238 *metadata_size = bo->metadata_size;
1239 if (flags)
1240 *flags = bo->metadata_flags;
1241
1242 return 0;
1243 }
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1256 bool evict,
1257 struct ttm_mem_reg *new_mem)
1258 {
1259 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1260 struct amdgpu_bo *abo;
1261 struct ttm_mem_reg *old_mem = &bo->mem;
1262
1263 if (!amdgpu_bo_is_amdgpu_bo(bo))
1264 return;
1265
1266 abo = ttm_to_amdgpu_bo(bo);
1267 amdgpu_vm_bo_invalidate(adev, abo, evict);
1268
1269 amdgpu_bo_kunmap(abo);
1270
1271
1272 if (evict)
1273 atomic64_inc(&adev->num_evictions);
1274
1275
1276 if (!new_mem)
1277 return;
1278
1279
1280 trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1281 }
1282
1283
1284
1285
1286
1287
1288
1289
1290 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1291 {
1292 struct dma_fence *fence = NULL;
1293 struct amdgpu_bo *abo;
1294 int r;
1295
1296 if (!amdgpu_bo_is_amdgpu_bo(bo))
1297 return;
1298
1299 abo = ttm_to_amdgpu_bo(bo);
1300
1301 if (abo->kfd_bo)
1302 amdgpu_amdkfd_unreserve_memory_limit(abo);
1303
1304 if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1305 !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1306 return;
1307
1308 dma_resv_lock(bo->base.resv, NULL);
1309
1310 r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1311 if (!WARN_ON(r)) {
1312 amdgpu_bo_fence(abo, fence, false);
1313 dma_fence_put(fence);
1314 }
1315
1316 dma_resv_unlock(bo->base.resv);
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1331 {
1332 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1333 struct ttm_operation_ctx ctx = { false, false };
1334 struct amdgpu_bo *abo;
1335 unsigned long offset, size;
1336 int r;
1337
1338 if (!amdgpu_bo_is_amdgpu_bo(bo))
1339 return 0;
1340
1341 abo = ttm_to_amdgpu_bo(bo);
1342
1343
1344 abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1345
1346 if (bo->mem.mem_type != TTM_PL_VRAM)
1347 return 0;
1348
1349 size = bo->mem.num_pages << PAGE_SHIFT;
1350 offset = bo->mem.start << PAGE_SHIFT;
1351 if ((offset + size) <= adev->gmc.visible_vram_size)
1352 return 0;
1353
1354
1355 if (abo->pin_count > 0)
1356 return -EINVAL;
1357
1358
1359 atomic64_inc(&adev->num_vram_cpu_page_faults);
1360 amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1361 AMDGPU_GEM_DOMAIN_GTT);
1362
1363
1364 abo->placement.num_busy_placement = 1;
1365 abo->placement.busy_placement = &abo->placements[1];
1366
1367 r = ttm_bo_validate(bo, &abo->placement, &ctx);
1368 if (unlikely(r != 0))
1369 return r;
1370
1371 offset = bo->mem.start << PAGE_SHIFT;
1372
1373 if (bo->mem.mem_type == TTM_PL_VRAM &&
1374 (offset + size) > adev->gmc.visible_vram_size)
1375 return -EINVAL;
1376
1377 return 0;
1378 }
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1389 bool shared)
1390 {
1391 struct dma_resv *resv = bo->tbo.base.resv;
1392
1393 if (shared)
1394 dma_resv_add_shared_fence(resv, fence);
1395 else
1396 dma_resv_add_excl_fence(resv, fence);
1397 }
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1410 {
1411 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1412 struct amdgpu_sync sync;
1413 int r;
1414
1415 amdgpu_sync_create(&sync);
1416 amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
1417 r = amdgpu_sync_wait(&sync, intr);
1418 amdgpu_sync_free(&sync);
1419
1420 return r;
1421 }
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1434 {
1435 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1436 WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1437 !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1438 WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1439 WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1440 !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1441
1442 return amdgpu_gmc_sign_extend(bo->tbo.offset);
1443 }
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1454 uint32_t domain)
1455 {
1456 if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1457 domain = AMDGPU_GEM_DOMAIN_VRAM;
1458 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1459 domain = AMDGPU_GEM_DOMAIN_GTT;
1460 }
1461 return domain;
1462 }