This source file includes following definitions.
- radeon_update_memory_usage
- radeon_ttm_bo_destroy
- radeon_ttm_bo_is_radeon_bo
- radeon_ttm_placement_from_domain
- radeon_bo_create
- radeon_bo_kmap
- radeon_bo_kunmap
- radeon_bo_ref
- radeon_bo_unref
- radeon_bo_pin_restricted
- radeon_bo_pin
- radeon_bo_unpin
- radeon_bo_evict_vram
- radeon_bo_force_delete
- radeon_bo_init
- radeon_bo_fini
- radeon_bo_get_threshold_for_moves
- radeon_bo_list_validate
- radeon_bo_get_surface_reg
- radeon_bo_clear_surface_reg
- radeon_bo_set_tiling_flags
- radeon_bo_get_tiling_flags
- radeon_bo_check_tiling
- radeon_bo_move_notify
- radeon_bo_fault_reserve_notify
- radeon_bo_wait
- radeon_bo_fence
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/io.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36
37 #include <drm/drm_cache.h>
38 #include <drm/drm_prime.h>
39 #include <drm/radeon_drm.h>
40
41 #include "radeon.h"
42 #include "radeon_trace.h"
43
44 int radeon_ttm_init(struct radeon_device *rdev);
45 void radeon_ttm_fini(struct radeon_device *rdev);
46 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
47
48
49
50
51
52
53 static void radeon_update_memory_usage(struct radeon_bo *bo,
54 unsigned mem_type, int sign)
55 {
56 struct radeon_device *rdev = bo->rdev;
57 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
58
59 switch (mem_type) {
60 case TTM_PL_TT:
61 if (sign > 0)
62 atomic64_add(size, &rdev->gtt_usage);
63 else
64 atomic64_sub(size, &rdev->gtt_usage);
65 break;
66 case TTM_PL_VRAM:
67 if (sign > 0)
68 atomic64_add(size, &rdev->vram_usage);
69 else
70 atomic64_sub(size, &rdev->vram_usage);
71 break;
72 }
73 }
74
75 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
76 {
77 struct radeon_bo *bo;
78
79 bo = container_of(tbo, struct radeon_bo, tbo);
80
81 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
82
83 mutex_lock(&bo->rdev->gem.mutex);
84 list_del_init(&bo->list);
85 mutex_unlock(&bo->rdev->gem.mutex);
86 radeon_bo_clear_surface_reg(bo);
87 WARN_ON_ONCE(!list_empty(&bo->va));
88 if (bo->tbo.base.import_attach)
89 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
90 drm_gem_object_release(&bo->tbo.base);
91 kfree(bo);
92 }
93
94 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
95 {
96 if (bo->destroy == &radeon_ttm_bo_destroy)
97 return true;
98 return false;
99 }
100
101 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
102 {
103 u32 c = 0, i;
104
105 rbo->placement.placement = rbo->placements;
106 rbo->placement.busy_placement = rbo->placements;
107 if (domain & RADEON_GEM_DOMAIN_VRAM) {
108
109
110
111 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
112 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
113 rbo->placements[c].fpfn =
114 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
115 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
116 TTM_PL_FLAG_UNCACHED |
117 TTM_PL_FLAG_VRAM;
118 }
119
120 rbo->placements[c].fpfn = 0;
121 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
122 TTM_PL_FLAG_UNCACHED |
123 TTM_PL_FLAG_VRAM;
124 }
125
126 if (domain & RADEON_GEM_DOMAIN_GTT) {
127 if (rbo->flags & RADEON_GEM_GTT_UC) {
128 rbo->placements[c].fpfn = 0;
129 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
130 TTM_PL_FLAG_TT;
131
132 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
133 (rbo->rdev->flags & RADEON_IS_AGP)) {
134 rbo->placements[c].fpfn = 0;
135 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
136 TTM_PL_FLAG_UNCACHED |
137 TTM_PL_FLAG_TT;
138 } else {
139 rbo->placements[c].fpfn = 0;
140 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
141 TTM_PL_FLAG_TT;
142 }
143 }
144
145 if (domain & RADEON_GEM_DOMAIN_CPU) {
146 if (rbo->flags & RADEON_GEM_GTT_UC) {
147 rbo->placements[c].fpfn = 0;
148 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
149 TTM_PL_FLAG_SYSTEM;
150
151 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
152 rbo->rdev->flags & RADEON_IS_AGP) {
153 rbo->placements[c].fpfn = 0;
154 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
155 TTM_PL_FLAG_UNCACHED |
156 TTM_PL_FLAG_SYSTEM;
157 } else {
158 rbo->placements[c].fpfn = 0;
159 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
160 TTM_PL_FLAG_SYSTEM;
161 }
162 }
163 if (!c) {
164 rbo->placements[c].fpfn = 0;
165 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
166 TTM_PL_FLAG_SYSTEM;
167 }
168
169 rbo->placement.num_placement = c;
170 rbo->placement.num_busy_placement = c;
171
172 for (i = 0; i < c; ++i) {
173 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
174 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
175 !rbo->placements[i].fpfn)
176 rbo->placements[i].lpfn =
177 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
178 else
179 rbo->placements[i].lpfn = 0;
180 }
181 }
182
183 int radeon_bo_create(struct radeon_device *rdev,
184 unsigned long size, int byte_align, bool kernel,
185 u32 domain, u32 flags, struct sg_table *sg,
186 struct dma_resv *resv,
187 struct radeon_bo **bo_ptr)
188 {
189 struct radeon_bo *bo;
190 enum ttm_bo_type type;
191 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
192 size_t acc_size;
193 int r;
194
195 size = ALIGN(size, PAGE_SIZE);
196
197 if (kernel) {
198 type = ttm_bo_type_kernel;
199 } else if (sg) {
200 type = ttm_bo_type_sg;
201 } else {
202 type = ttm_bo_type_device;
203 }
204 *bo_ptr = NULL;
205
206 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
207 sizeof(struct radeon_bo));
208
209 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
210 if (bo == NULL)
211 return -ENOMEM;
212 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
213 bo->rdev = rdev;
214 bo->surface_reg = -1;
215 INIT_LIST_HEAD(&bo->list);
216 INIT_LIST_HEAD(&bo->va);
217 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
218 RADEON_GEM_DOMAIN_GTT |
219 RADEON_GEM_DOMAIN_CPU);
220
221 bo->flags = flags;
222
223 if (!(rdev->flags & RADEON_IS_PCIE))
224 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
225
226
227
228
229 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
230 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
231
232 #ifdef CONFIG_X86_32
233
234
235
236 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
237 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
238
239
240
241
242 #ifndef CONFIG_COMPILE_TEST
243 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
244 thanks to write-combining
245 #endif
246
247 if (bo->flags & RADEON_GEM_GTT_WC)
248 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
249 "better performance thanks to write-combining\n");
250 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
251 #else
252
253
254
255 if (!drm_arch_can_wc_memory())
256 bo->flags &= ~RADEON_GEM_GTT_WC;
257 #endif
258
259 radeon_ttm_placement_from_domain(bo, domain);
260
261 down_read(&rdev->pm.mclk_lock);
262 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
263 &bo->placement, page_align, !kernel, acc_size,
264 sg, resv, &radeon_ttm_bo_destroy);
265 up_read(&rdev->pm.mclk_lock);
266 if (unlikely(r != 0)) {
267 return r;
268 }
269 *bo_ptr = bo;
270
271 trace_radeon_bo_create(bo);
272
273 return 0;
274 }
275
276 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
277 {
278 bool is_iomem;
279 int r;
280
281 if (bo->kptr) {
282 if (ptr) {
283 *ptr = bo->kptr;
284 }
285 return 0;
286 }
287 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
288 if (r) {
289 return r;
290 }
291 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
292 if (ptr) {
293 *ptr = bo->kptr;
294 }
295 radeon_bo_check_tiling(bo, 0, 0);
296 return 0;
297 }
298
299 void radeon_bo_kunmap(struct radeon_bo *bo)
300 {
301 if (bo->kptr == NULL)
302 return;
303 bo->kptr = NULL;
304 radeon_bo_check_tiling(bo, 0, 0);
305 ttm_bo_kunmap(&bo->kmap);
306 }
307
308 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
309 {
310 if (bo == NULL)
311 return NULL;
312
313 ttm_bo_get(&bo->tbo);
314 return bo;
315 }
316
317 void radeon_bo_unref(struct radeon_bo **bo)
318 {
319 struct ttm_buffer_object *tbo;
320
321 if ((*bo) == NULL)
322 return;
323 tbo = &((*bo)->tbo);
324 ttm_bo_put(tbo);
325 *bo = NULL;
326 }
327
328 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
329 u64 *gpu_addr)
330 {
331 struct ttm_operation_ctx ctx = { false, false };
332 int r, i;
333
334 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
335 return -EPERM;
336
337 if (bo->pin_count) {
338 bo->pin_count++;
339 if (gpu_addr)
340 *gpu_addr = radeon_bo_gpu_offset(bo);
341
342 if (max_offset != 0) {
343 u64 domain_start;
344
345 if (domain == RADEON_GEM_DOMAIN_VRAM)
346 domain_start = bo->rdev->mc.vram_start;
347 else
348 domain_start = bo->rdev->mc.gtt_start;
349 WARN_ON_ONCE(max_offset <
350 (radeon_bo_gpu_offset(bo) - domain_start));
351 }
352
353 return 0;
354 }
355 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
356
357 return -EINVAL;
358 }
359
360 radeon_ttm_placement_from_domain(bo, domain);
361 for (i = 0; i < bo->placement.num_placement; i++) {
362
363 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
364 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
365 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
366 bo->placements[i].lpfn =
367 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
368 else
369 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
370
371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
372 }
373
374 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
375 if (likely(r == 0)) {
376 bo->pin_count = 1;
377 if (gpu_addr != NULL)
378 *gpu_addr = radeon_bo_gpu_offset(bo);
379 if (domain == RADEON_GEM_DOMAIN_VRAM)
380 bo->rdev->vram_pin_size += radeon_bo_size(bo);
381 else
382 bo->rdev->gart_pin_size += radeon_bo_size(bo);
383 } else {
384 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
385 }
386 return r;
387 }
388
389 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
390 {
391 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
392 }
393
394 int radeon_bo_unpin(struct radeon_bo *bo)
395 {
396 struct ttm_operation_ctx ctx = { false, false };
397 int r, i;
398
399 if (!bo->pin_count) {
400 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
401 return 0;
402 }
403 bo->pin_count--;
404 if (bo->pin_count)
405 return 0;
406 for (i = 0; i < bo->placement.num_placement; i++) {
407 bo->placements[i].lpfn = 0;
408 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
409 }
410 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
411 if (likely(r == 0)) {
412 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
413 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
414 else
415 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
416 } else {
417 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
418 }
419 return r;
420 }
421
422 int radeon_bo_evict_vram(struct radeon_device *rdev)
423 {
424
425 #ifndef CONFIG_HIBERNATION
426 if (rdev->flags & RADEON_IS_IGP) {
427 if (rdev->mc.igp_sideport_enabled == false)
428
429 return 0;
430 }
431 #endif
432 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
433 }
434
435 void radeon_bo_force_delete(struct radeon_device *rdev)
436 {
437 struct radeon_bo *bo, *n;
438
439 if (list_empty(&rdev->gem.objects)) {
440 return;
441 }
442 dev_err(rdev->dev, "Userspace still has active objects !\n");
443 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
444 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
445 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
446 *((unsigned long *)&bo->tbo.base.refcount));
447 mutex_lock(&bo->rdev->gem.mutex);
448 list_del_init(&bo->list);
449 mutex_unlock(&bo->rdev->gem.mutex);
450
451 drm_gem_object_put_unlocked(&bo->tbo.base);
452 }
453 }
454
455 int radeon_bo_init(struct radeon_device *rdev)
456 {
457
458 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
459 rdev->mc.aper_size);
460
461
462 if (!rdev->fastfb_working) {
463 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
464 rdev->mc.aper_size);
465 }
466 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
467 rdev->mc.mc_vram_size >> 20,
468 (unsigned long long)rdev->mc.aper_size >> 20);
469 DRM_INFO("RAM width %dbits %cDR\n",
470 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
471 return radeon_ttm_init(rdev);
472 }
473
474 void radeon_bo_fini(struct radeon_device *rdev)
475 {
476 radeon_ttm_fini(rdev);
477 arch_phys_wc_del(rdev->mc.vram_mtrr);
478 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
479 }
480
481
482
483 static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
484 {
485 u64 real_vram_size = rdev->mc.real_vram_size;
486 u64 vram_usage = atomic64_read(&rdev->vram_usage);
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527 u64 half_vram = real_vram_size >> 1;
528 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
529 u64 bytes_moved_threshold = half_free_vram >> 1;
530 return max(bytes_moved_threshold, 1024*1024ull);
531 }
532
533 int radeon_bo_list_validate(struct radeon_device *rdev,
534 struct ww_acquire_ctx *ticket,
535 struct list_head *head, int ring)
536 {
537 struct ttm_operation_ctx ctx = { true, false };
538 struct radeon_bo_list *lobj;
539 struct list_head duplicates;
540 int r;
541 u64 bytes_moved = 0, initial_bytes_moved;
542 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
543
544 INIT_LIST_HEAD(&duplicates);
545 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
546 if (unlikely(r != 0)) {
547 return r;
548 }
549
550 list_for_each_entry(lobj, head, tv.head) {
551 struct radeon_bo *bo = lobj->robj;
552 if (!bo->pin_count) {
553 u32 domain = lobj->preferred_domains;
554 u32 allowed = lobj->allowed_domains;
555 u32 current_domain =
556 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
557
558
559
560
561
562
563
564
565
566 if ((allowed & current_domain) != 0 &&
567 (domain & current_domain) == 0 &&
568 bytes_moved > bytes_moved_threshold) {
569
570 domain = current_domain;
571 }
572
573 retry:
574 radeon_ttm_placement_from_domain(bo, domain);
575 if (ring == R600_RING_TYPE_UVD_INDEX)
576 radeon_uvd_force_into_uvd_segment(bo, allowed);
577
578 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
579 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
580 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
581 initial_bytes_moved;
582
583 if (unlikely(r)) {
584 if (r != -ERESTARTSYS &&
585 domain != lobj->allowed_domains) {
586 domain = lobj->allowed_domains;
587 goto retry;
588 }
589 ttm_eu_backoff_reservation(ticket, head);
590 return r;
591 }
592 }
593 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
594 lobj->tiling_flags = bo->tiling_flags;
595 }
596
597 list_for_each_entry(lobj, &duplicates, tv.head) {
598 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
599 lobj->tiling_flags = lobj->robj->tiling_flags;
600 }
601
602 return 0;
603 }
604
605 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
606 {
607 struct radeon_device *rdev = bo->rdev;
608 struct radeon_surface_reg *reg;
609 struct radeon_bo *old_object;
610 int steal;
611 int i;
612
613 dma_resv_assert_held(bo->tbo.base.resv);
614
615 if (!bo->tiling_flags)
616 return 0;
617
618 if (bo->surface_reg >= 0) {
619 reg = &rdev->surface_regs[bo->surface_reg];
620 i = bo->surface_reg;
621 goto out;
622 }
623
624 steal = -1;
625 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
626
627 reg = &rdev->surface_regs[i];
628 if (!reg->bo)
629 break;
630
631 old_object = reg->bo;
632 if (old_object->pin_count == 0)
633 steal = i;
634 }
635
636
637 if (i == RADEON_GEM_MAX_SURFACES) {
638 if (steal == -1)
639 return -ENOMEM;
640
641 reg = &rdev->surface_regs[steal];
642 old_object = reg->bo;
643
644 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
645 ttm_bo_unmap_virtual(&old_object->tbo);
646 old_object->surface_reg = -1;
647 i = steal;
648 }
649
650 bo->surface_reg = i;
651 reg->bo = bo;
652
653 out:
654 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
655 bo->tbo.mem.start << PAGE_SHIFT,
656 bo->tbo.num_pages << PAGE_SHIFT);
657 return 0;
658 }
659
660 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
661 {
662 struct radeon_device *rdev = bo->rdev;
663 struct radeon_surface_reg *reg;
664
665 if (bo->surface_reg == -1)
666 return;
667
668 reg = &rdev->surface_regs[bo->surface_reg];
669 radeon_clear_surface_reg(rdev, bo->surface_reg);
670
671 reg->bo = NULL;
672 bo->surface_reg = -1;
673 }
674
675 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
676 uint32_t tiling_flags, uint32_t pitch)
677 {
678 struct radeon_device *rdev = bo->rdev;
679 int r;
680
681 if (rdev->family >= CHIP_CEDAR) {
682 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
683
684 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
685 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
686 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
687 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
688 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
689 switch (bankw) {
690 case 0:
691 case 1:
692 case 2:
693 case 4:
694 case 8:
695 break;
696 default:
697 return -EINVAL;
698 }
699 switch (bankh) {
700 case 0:
701 case 1:
702 case 2:
703 case 4:
704 case 8:
705 break;
706 default:
707 return -EINVAL;
708 }
709 switch (mtaspect) {
710 case 0:
711 case 1:
712 case 2:
713 case 4:
714 case 8:
715 break;
716 default:
717 return -EINVAL;
718 }
719 if (tilesplit > 6) {
720 return -EINVAL;
721 }
722 if (stilesplit > 6) {
723 return -EINVAL;
724 }
725 }
726 r = radeon_bo_reserve(bo, false);
727 if (unlikely(r != 0))
728 return r;
729 bo->tiling_flags = tiling_flags;
730 bo->pitch = pitch;
731 radeon_bo_unreserve(bo);
732 return 0;
733 }
734
735 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
736 uint32_t *tiling_flags,
737 uint32_t *pitch)
738 {
739 dma_resv_assert_held(bo->tbo.base.resv);
740
741 if (tiling_flags)
742 *tiling_flags = bo->tiling_flags;
743 if (pitch)
744 *pitch = bo->pitch;
745 }
746
747 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
748 bool force_drop)
749 {
750 if (!force_drop)
751 dma_resv_assert_held(bo->tbo.base.resv);
752
753 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
754 return 0;
755
756 if (force_drop) {
757 radeon_bo_clear_surface_reg(bo);
758 return 0;
759 }
760
761 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
762 if (!has_moved)
763 return 0;
764
765 if (bo->surface_reg >= 0)
766 radeon_bo_clear_surface_reg(bo);
767 return 0;
768 }
769
770 if ((bo->surface_reg >= 0) && !has_moved)
771 return 0;
772
773 return radeon_bo_get_surface_reg(bo);
774 }
775
776 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
777 bool evict,
778 struct ttm_mem_reg *new_mem)
779 {
780 struct radeon_bo *rbo;
781
782 if (!radeon_ttm_bo_is_radeon_bo(bo))
783 return;
784
785 rbo = container_of(bo, struct radeon_bo, tbo);
786 radeon_bo_check_tiling(rbo, 0, 1);
787 radeon_vm_bo_invalidate(rbo->rdev, rbo);
788
789
790 if (!new_mem)
791 return;
792
793 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
794 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
795 }
796
797 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
798 {
799 struct ttm_operation_ctx ctx = { false, false };
800 struct radeon_device *rdev;
801 struct radeon_bo *rbo;
802 unsigned long offset, size, lpfn;
803 int i, r;
804
805 if (!radeon_ttm_bo_is_radeon_bo(bo))
806 return 0;
807 rbo = container_of(bo, struct radeon_bo, tbo);
808 radeon_bo_check_tiling(rbo, 0, 0);
809 rdev = rbo->rdev;
810 if (bo->mem.mem_type != TTM_PL_VRAM)
811 return 0;
812
813 size = bo->mem.num_pages << PAGE_SHIFT;
814 offset = bo->mem.start << PAGE_SHIFT;
815 if ((offset + size) <= rdev->mc.visible_vram_size)
816 return 0;
817
818
819 if (rbo->pin_count > 0)
820 return -EINVAL;
821
822
823 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
824 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
825 for (i = 0; i < rbo->placement.num_placement; i++) {
826
827 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
828 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
829 rbo->placements[i].lpfn = lpfn;
830 }
831 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
832 if (unlikely(r == -ENOMEM)) {
833 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
834 return ttm_bo_validate(bo, &rbo->placement, &ctx);
835 } else if (unlikely(r != 0)) {
836 return r;
837 }
838
839 offset = bo->mem.start << PAGE_SHIFT;
840
841 if ((offset + size) > rdev->mc.visible_vram_size)
842 return -EINVAL;
843
844 return 0;
845 }
846
847 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
848 {
849 int r;
850
851 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
852 if (unlikely(r != 0))
853 return r;
854 if (mem_type)
855 *mem_type = bo->tbo.mem.mem_type;
856
857 r = ttm_bo_wait(&bo->tbo, true, no_wait);
858 ttm_bo_unreserve(&bo->tbo);
859 return r;
860 }
861
862
863
864
865
866
867
868
869
870 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
871 bool shared)
872 {
873 struct dma_resv *resv = bo->tbo.base.resv;
874
875 if (shared)
876 dma_resv_add_shared_fence(resv, &fence->base);
877 else
878 dma_resv_add_excl_fence(resv, &fence->base);
879 }