1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_trace.h"
32
33 /*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53 /**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
radeon_vm_num_pdes(struct radeon_device * rdev)60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 {
62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63 }
64
65 /**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
radeon_vm_directory_size(struct radeon_device * rdev)72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 {
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 }
76
77 /**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
radeon_vm_manager_init(struct radeon_device * rdev)85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 {
87 int r;
88
89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
95 }
96 return 0;
97 }
98
99 /**
100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
radeon_vm_manager_fini(struct radeon_device * rdev)106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 {
108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false;
117 }
118
119 /**
120 * radeon_vm_get_bos - add the vm BOs to a validation list
121 *
122 * @vm: vm providing the BOs
123 * @head: head of validation list
124 *
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
127 */
radeon_vm_get_bos(struct radeon_device * rdev,struct radeon_vm * vm,struct list_head * head)128 struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
131 {
132 struct radeon_bo_list *list;
133 unsigned i, idx;
134
135 list = drm_malloc_ab(vm->max_pde_used + 2,
136 sizeof(struct radeon_bo_list));
137 if (!list)
138 return NULL;
139
140 /* add the vm page table to the list */
141 list[0].robj = vm->page_directory;
142 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
143 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].tv.bo = &vm->page_directory->tbo;
145 list[0].tv.shared = true;
146 list[0].tiling_flags = 0;
147 list_add(&list[0].tv.head, head);
148
149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
150 if (!vm->page_tables[i].bo)
151 continue;
152
153 list[idx].robj = vm->page_tables[i].bo;
154 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
155 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
156 list[idx].tv.bo = &list[idx].robj->tbo;
157 list[idx].tv.shared = true;
158 list[idx].tiling_flags = 0;
159 list_add(&list[idx++].tv.head, head);
160 }
161
162 return list;
163 }
164
165 /**
166 * radeon_vm_grab_id - allocate the next free VMID
167 *
168 * @rdev: radeon_device pointer
169 * @vm: vm to allocate id for
170 * @ring: ring we want to submit job to
171 *
172 * Allocate an id for the vm (cayman+).
173 * Returns the fence we need to sync to (if any).
174 *
175 * Global and local mutex must be locked!
176 */
radeon_vm_grab_id(struct radeon_device * rdev,struct radeon_vm * vm,int ring)177 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
178 struct radeon_vm *vm, int ring)
179 {
180 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
181 struct radeon_vm_id *vm_id = &vm->ids[ring];
182
183 unsigned choices[2] = {};
184 unsigned i;
185
186 /* check if the id is still valid */
187 if (vm_id->id && vm_id->last_id_use &&
188 vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
189 return NULL;
190
191 /* we definately need to flush */
192 vm_id->pd_gpu_addr = ~0ll;
193
194 /* skip over VMID 0, since it is the system VM */
195 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
196 struct radeon_fence *fence = rdev->vm_manager.active[i];
197
198 if (fence == NULL) {
199 /* found a free one */
200 vm_id->id = i;
201 trace_radeon_vm_grab_id(i, ring);
202 return NULL;
203 }
204
205 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
206 best[fence->ring] = fence;
207 choices[fence->ring == ring ? 0 : 1] = i;
208 }
209 }
210
211 for (i = 0; i < 2; ++i) {
212 if (choices[i]) {
213 vm_id->id = choices[i];
214 trace_radeon_vm_grab_id(choices[i], ring);
215 return rdev->vm_manager.active[choices[i]];
216 }
217 }
218
219 /* should never happen */
220 BUG();
221 return NULL;
222 }
223
224 /**
225 * radeon_vm_flush - hardware flush the vm
226 *
227 * @rdev: radeon_device pointer
228 * @vm: vm we want to flush
229 * @ring: ring to use for flush
230 * @updates: last vm update that is waited for
231 *
232 * Flush the vm (cayman+).
233 *
234 * Global and local mutex must be locked!
235 */
radeon_vm_flush(struct radeon_device * rdev,struct radeon_vm * vm,int ring,struct radeon_fence * updates)236 void radeon_vm_flush(struct radeon_device *rdev,
237 struct radeon_vm *vm,
238 int ring, struct radeon_fence *updates)
239 {
240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
241 struct radeon_vm_id *vm_id = &vm->ids[ring];
242
243 if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
244 radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
245
246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
247 radeon_fence_unref(&vm_id->flushed_updates);
248 vm_id->flushed_updates = radeon_fence_ref(updates);
249 vm_id->pd_gpu_addr = pd_addr;
250 radeon_ring_vm_flush(rdev, &rdev->ring[ring],
251 vm_id->id, vm_id->pd_gpu_addr);
252
253 }
254 }
255
256 /**
257 * radeon_vm_fence - remember fence for vm
258 *
259 * @rdev: radeon_device pointer
260 * @vm: vm we want to fence
261 * @fence: fence to remember
262 *
263 * Fence the vm (cayman+).
264 * Set the fence used to protect page table and id.
265 *
266 * Global and local mutex must be locked!
267 */
radeon_vm_fence(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_fence * fence)268 void radeon_vm_fence(struct radeon_device *rdev,
269 struct radeon_vm *vm,
270 struct radeon_fence *fence)
271 {
272 unsigned vm_id = vm->ids[fence->ring].id;
273
274 radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
275 rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
276
277 radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
278 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
279 }
280
281 /**
282 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
283 *
284 * @vm: requested vm
285 * @bo: requested buffer object
286 *
287 * Find @bo inside the requested vm (cayman+).
288 * Search inside the @bos vm list for the requested vm
289 * Returns the found bo_va or NULL if none is found
290 *
291 * Object has to be reserved!
292 */
radeon_vm_bo_find(struct radeon_vm * vm,struct radeon_bo * bo)293 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
294 struct radeon_bo *bo)
295 {
296 struct radeon_bo_va *bo_va;
297
298 list_for_each_entry(bo_va, &bo->va, bo_list) {
299 if (bo_va->vm == vm) {
300 return bo_va;
301 }
302 }
303 return NULL;
304 }
305
306 /**
307 * radeon_vm_bo_add - add a bo to a specific vm
308 *
309 * @rdev: radeon_device pointer
310 * @vm: requested vm
311 * @bo: radeon buffer object
312 *
313 * Add @bo into the requested vm (cayman+).
314 * Add @bo to the list of bos associated with the vm
315 * Returns newly added bo_va or NULL for failure
316 *
317 * Object has to be reserved!
318 */
radeon_vm_bo_add(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_bo * bo)319 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
320 struct radeon_vm *vm,
321 struct radeon_bo *bo)
322 {
323 struct radeon_bo_va *bo_va;
324
325 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
326 if (bo_va == NULL) {
327 return NULL;
328 }
329 bo_va->vm = vm;
330 bo_va->bo = bo;
331 bo_va->it.start = 0;
332 bo_va->it.last = 0;
333 bo_va->flags = 0;
334 bo_va->addr = 0;
335 bo_va->ref_count = 1;
336 INIT_LIST_HEAD(&bo_va->bo_list);
337 INIT_LIST_HEAD(&bo_va->vm_status);
338
339 mutex_lock(&vm->mutex);
340 list_add_tail(&bo_va->bo_list, &bo->va);
341 mutex_unlock(&vm->mutex);
342
343 return bo_va;
344 }
345
346 /**
347 * radeon_vm_set_pages - helper to call the right asic function
348 *
349 * @rdev: radeon_device pointer
350 * @ib: indirect buffer to fill with commands
351 * @pe: addr of the page entry
352 * @addr: dst addr to write into pe
353 * @count: number of page entries to update
354 * @incr: increase next addr by incr bytes
355 * @flags: hw access flags
356 *
357 * Traces the parameters and calls the right asic functions
358 * to setup the page table using the DMA.
359 */
radeon_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)360 static void radeon_vm_set_pages(struct radeon_device *rdev,
361 struct radeon_ib *ib,
362 uint64_t pe,
363 uint64_t addr, unsigned count,
364 uint32_t incr, uint32_t flags)
365 {
366 trace_radeon_vm_set_page(pe, addr, count, incr, flags);
367
368 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
369 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
370 radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
371
372 } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
373 radeon_asic_vm_write_pages(rdev, ib, pe, addr,
374 count, incr, flags);
375
376 } else {
377 radeon_asic_vm_set_pages(rdev, ib, pe, addr,
378 count, incr, flags);
379 }
380 }
381
382 /**
383 * radeon_vm_clear_bo - initially clear the page dir/table
384 *
385 * @rdev: radeon_device pointer
386 * @bo: bo to clear
387 */
radeon_vm_clear_bo(struct radeon_device * rdev,struct radeon_bo * bo)388 static int radeon_vm_clear_bo(struct radeon_device *rdev,
389 struct radeon_bo *bo)
390 {
391 struct radeon_ib ib;
392 unsigned entries;
393 uint64_t addr;
394 int r;
395
396 r = radeon_bo_reserve(bo, false);
397 if (r)
398 return r;
399
400 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
401 if (r)
402 goto error_unreserve;
403
404 addr = radeon_bo_gpu_offset(bo);
405 entries = radeon_bo_size(bo) / 8;
406
407 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
408 if (r)
409 goto error_unreserve;
410
411 ib.length_dw = 0;
412
413 radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
414 radeon_asic_vm_pad_ib(rdev, &ib);
415 WARN_ON(ib.length_dw > 64);
416
417 r = radeon_ib_schedule(rdev, &ib, NULL, false);
418 if (r)
419 goto error_free;
420
421 ib.fence->is_vm_update = true;
422 radeon_bo_fence(bo, ib.fence, false);
423
424 error_free:
425 radeon_ib_free(rdev, &ib);
426
427 error_unreserve:
428 radeon_bo_unreserve(bo);
429 return r;
430 }
431
432 /**
433 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
434 *
435 * @rdev: radeon_device pointer
436 * @bo_va: bo_va to store the address
437 * @soffset: requested offset of the buffer in the VM address space
438 * @flags: attributes of pages (read/write/valid/etc.)
439 *
440 * Set offset of @bo_va (cayman+).
441 * Validate and set the offset requested within the vm address space.
442 * Returns 0 for success, error for failure.
443 *
444 * Object has to be reserved and gets unreserved by this function!
445 */
radeon_vm_bo_set_addr(struct radeon_device * rdev,struct radeon_bo_va * bo_va,uint64_t soffset,uint32_t flags)446 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
447 struct radeon_bo_va *bo_va,
448 uint64_t soffset,
449 uint32_t flags)
450 {
451 uint64_t size = radeon_bo_size(bo_va->bo);
452 struct radeon_vm *vm = bo_va->vm;
453 unsigned last_pfn, pt_idx;
454 uint64_t eoffset;
455 int r;
456
457 if (soffset) {
458 /* make sure object fit at this offset */
459 eoffset = soffset + size - 1;
460 if (soffset >= eoffset) {
461 r = -EINVAL;
462 goto error_unreserve;
463 }
464
465 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
466 if (last_pfn >= rdev->vm_manager.max_pfn) {
467 dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
468 last_pfn, rdev->vm_manager.max_pfn);
469 r = -EINVAL;
470 goto error_unreserve;
471 }
472
473 } else {
474 eoffset = last_pfn = 0;
475 }
476
477 mutex_lock(&vm->mutex);
478 soffset /= RADEON_GPU_PAGE_SIZE;
479 eoffset /= RADEON_GPU_PAGE_SIZE;
480 if (soffset || eoffset) {
481 struct interval_tree_node *it;
482 it = interval_tree_iter_first(&vm->va, soffset, eoffset);
483 if (it && it != &bo_va->it) {
484 struct radeon_bo_va *tmp;
485 tmp = container_of(it, struct radeon_bo_va, it);
486 /* bo and tmp overlap, invalid offset */
487 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
488 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
489 soffset, tmp->bo, tmp->it.start, tmp->it.last);
490 mutex_unlock(&vm->mutex);
491 r = -EINVAL;
492 goto error_unreserve;
493 }
494 }
495
496 if (bo_va->it.start || bo_va->it.last) {
497 if (bo_va->addr) {
498 /* add a clone of the bo_va to clear the old address */
499 struct radeon_bo_va *tmp;
500 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
501 if (!tmp) {
502 mutex_unlock(&vm->mutex);
503 r = -ENOMEM;
504 goto error_unreserve;
505 }
506 tmp->it.start = bo_va->it.start;
507 tmp->it.last = bo_va->it.last;
508 tmp->vm = vm;
509 tmp->addr = bo_va->addr;
510 tmp->bo = radeon_bo_ref(bo_va->bo);
511 spin_lock(&vm->status_lock);
512 list_add(&tmp->vm_status, &vm->freed);
513 spin_unlock(&vm->status_lock);
514
515 bo_va->addr = 0;
516 }
517
518 interval_tree_remove(&bo_va->it, &vm->va);
519 bo_va->it.start = 0;
520 bo_va->it.last = 0;
521 }
522
523 if (soffset || eoffset) {
524 bo_va->it.start = soffset;
525 bo_va->it.last = eoffset;
526 interval_tree_insert(&bo_va->it, &vm->va);
527 }
528
529 bo_va->flags = flags;
530 bo_va->addr = 0;
531
532 soffset >>= radeon_vm_block_size;
533 eoffset >>= radeon_vm_block_size;
534
535 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
536
537 if (eoffset > vm->max_pde_used)
538 vm->max_pde_used = eoffset;
539
540 radeon_bo_unreserve(bo_va->bo);
541
542 /* walk over the address space and allocate the page tables */
543 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
544 struct radeon_bo *pt;
545
546 if (vm->page_tables[pt_idx].bo)
547 continue;
548
549 /* drop mutex to allocate and clear page table */
550 mutex_unlock(&vm->mutex);
551
552 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
553 RADEON_GPU_PAGE_SIZE, true,
554 RADEON_GEM_DOMAIN_VRAM, 0,
555 NULL, NULL, &pt);
556 if (r)
557 return r;
558
559 r = radeon_vm_clear_bo(rdev, pt);
560 if (r) {
561 radeon_bo_unref(&pt);
562 return r;
563 }
564
565 /* aquire mutex again */
566 mutex_lock(&vm->mutex);
567 if (vm->page_tables[pt_idx].bo) {
568 /* someone else allocated the pt in the meantime */
569 mutex_unlock(&vm->mutex);
570 radeon_bo_unref(&pt);
571 mutex_lock(&vm->mutex);
572 continue;
573 }
574
575 vm->page_tables[pt_idx].addr = 0;
576 vm->page_tables[pt_idx].bo = pt;
577 }
578
579 mutex_unlock(&vm->mutex);
580 return 0;
581
582 error_unreserve:
583 radeon_bo_unreserve(bo_va->bo);
584 return r;
585 }
586
587 /**
588 * radeon_vm_map_gart - get the physical address of a gart page
589 *
590 * @rdev: radeon_device pointer
591 * @addr: the unmapped addr
592 *
593 * Look up the physical address of the page that the pte resolves
594 * to (cayman+).
595 * Returns the physical address of the page.
596 */
radeon_vm_map_gart(struct radeon_device * rdev,uint64_t addr)597 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
598 {
599 uint64_t result;
600
601 /* page table offset */
602 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
603 result &= ~RADEON_GPU_PAGE_MASK;
604
605 return result;
606 }
607
608 /**
609 * radeon_vm_page_flags - translate page flags to what the hw uses
610 *
611 * @flags: flags comming from userspace
612 *
613 * Translate the flags the userspace ABI uses to hw flags.
614 */
radeon_vm_page_flags(uint32_t flags)615 static uint32_t radeon_vm_page_flags(uint32_t flags)
616 {
617 uint32_t hw_flags = 0;
618 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
619 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
620 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
621 if (flags & RADEON_VM_PAGE_SYSTEM) {
622 hw_flags |= R600_PTE_SYSTEM;
623 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
624 }
625 return hw_flags;
626 }
627
628 /**
629 * radeon_vm_update_pdes - make sure that page directory is valid
630 *
631 * @rdev: radeon_device pointer
632 * @vm: requested vm
633 * @start: start of GPU address range
634 * @end: end of GPU address range
635 *
636 * Allocates new page tables if necessary
637 * and updates the page directory (cayman+).
638 * Returns 0 for success, error for failure.
639 *
640 * Global and local mutex must be locked!
641 */
radeon_vm_update_page_directory(struct radeon_device * rdev,struct radeon_vm * vm)642 int radeon_vm_update_page_directory(struct radeon_device *rdev,
643 struct radeon_vm *vm)
644 {
645 struct radeon_bo *pd = vm->page_directory;
646 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
647 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
648 uint64_t last_pde = ~0, last_pt = ~0;
649 unsigned count = 0, pt_idx, ndw;
650 struct radeon_ib ib;
651 int r;
652
653 /* padding, etc. */
654 ndw = 64;
655
656 /* assume the worst case */
657 ndw += vm->max_pde_used * 6;
658
659 /* update too big for an IB */
660 if (ndw > 0xfffff)
661 return -ENOMEM;
662
663 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
664 if (r)
665 return r;
666 ib.length_dw = 0;
667
668 /* walk over the address space and update the page directory */
669 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
670 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
671 uint64_t pde, pt;
672
673 if (bo == NULL)
674 continue;
675
676 pt = radeon_bo_gpu_offset(bo);
677 if (vm->page_tables[pt_idx].addr == pt)
678 continue;
679 vm->page_tables[pt_idx].addr = pt;
680
681 pde = pd_addr + pt_idx * 8;
682 if (((last_pde + 8 * count) != pde) ||
683 ((last_pt + incr * count) != pt)) {
684
685 if (count) {
686 radeon_vm_set_pages(rdev, &ib, last_pde,
687 last_pt, count, incr,
688 R600_PTE_VALID);
689 }
690
691 count = 1;
692 last_pde = pde;
693 last_pt = pt;
694 } else {
695 ++count;
696 }
697 }
698
699 if (count)
700 radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
701 incr, R600_PTE_VALID);
702
703 if (ib.length_dw != 0) {
704 radeon_asic_vm_pad_ib(rdev, &ib);
705
706 radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
707 WARN_ON(ib.length_dw > ndw);
708 r = radeon_ib_schedule(rdev, &ib, NULL, false);
709 if (r) {
710 radeon_ib_free(rdev, &ib);
711 return r;
712 }
713 ib.fence->is_vm_update = true;
714 radeon_bo_fence(pd, ib.fence, false);
715 }
716 radeon_ib_free(rdev, &ib);
717
718 return 0;
719 }
720
721 /**
722 * radeon_vm_frag_ptes - add fragment information to PTEs
723 *
724 * @rdev: radeon_device pointer
725 * @ib: IB for the update
726 * @pe_start: first PTE to handle
727 * @pe_end: last PTE to handle
728 * @addr: addr those PTEs should point to
729 * @flags: hw mapping flags
730 *
731 * Global and local mutex must be locked!
732 */
radeon_vm_frag_ptes(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe_start,uint64_t pe_end,uint64_t addr,uint32_t flags)733 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
734 struct radeon_ib *ib,
735 uint64_t pe_start, uint64_t pe_end,
736 uint64_t addr, uint32_t flags)
737 {
738 /**
739 * The MC L1 TLB supports variable sized pages, based on a fragment
740 * field in the PTE. When this field is set to a non-zero value, page
741 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
742 * flags are considered valid for all PTEs within the fragment range
743 * and corresponding mappings are assumed to be physically contiguous.
744 *
745 * The L1 TLB can store a single PTE for the whole fragment,
746 * significantly increasing the space available for translation
747 * caching. This leads to large improvements in throughput when the
748 * TLB is under pressure.
749 *
750 * The L2 TLB distributes small and large fragments into two
751 * asymmetric partitions. The large fragment cache is significantly
752 * larger. Thus, we try to use large fragments wherever possible.
753 * Userspace can support this by aligning virtual base address and
754 * allocation size to the fragment size.
755 */
756
757 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
758 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
759 (rdev->family == CHIP_ARUBA)) ?
760 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
761 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
762 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
763
764 uint64_t frag_start = ALIGN(pe_start, frag_align);
765 uint64_t frag_end = pe_end & ~(frag_align - 1);
766
767 unsigned count;
768
769 /* system pages are non continuously */
770 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
771 (frag_start >= frag_end)) {
772
773 count = (pe_end - pe_start) / 8;
774 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
775 RADEON_GPU_PAGE_SIZE, flags);
776 return;
777 }
778
779 /* handle the 4K area at the beginning */
780 if (pe_start != frag_start) {
781 count = (frag_start - pe_start) / 8;
782 radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
783 RADEON_GPU_PAGE_SIZE, flags);
784 addr += RADEON_GPU_PAGE_SIZE * count;
785 }
786
787 /* handle the area in the middle */
788 count = (frag_end - frag_start) / 8;
789 radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
790 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
791
792 /* handle the 4K area at the end */
793 if (frag_end != pe_end) {
794 addr += RADEON_GPU_PAGE_SIZE * count;
795 count = (pe_end - frag_end) / 8;
796 radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
797 RADEON_GPU_PAGE_SIZE, flags);
798 }
799 }
800
801 /**
802 * radeon_vm_update_ptes - make sure that page tables are valid
803 *
804 * @rdev: radeon_device pointer
805 * @vm: requested vm
806 * @start: start of GPU address range
807 * @end: end of GPU address range
808 * @dst: destination address to map to
809 * @flags: mapping flags
810 *
811 * Update the page tables in the range @start - @end (cayman+).
812 *
813 * Global and local mutex must be locked!
814 */
radeon_vm_update_ptes(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_ib * ib,uint64_t start,uint64_t end,uint64_t dst,uint32_t flags)815 static int radeon_vm_update_ptes(struct radeon_device *rdev,
816 struct radeon_vm *vm,
817 struct radeon_ib *ib,
818 uint64_t start, uint64_t end,
819 uint64_t dst, uint32_t flags)
820 {
821 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
822 uint64_t last_pte = ~0, last_dst = ~0;
823 unsigned count = 0;
824 uint64_t addr;
825
826 /* walk over the address space and update the page tables */
827 for (addr = start; addr < end; ) {
828 uint64_t pt_idx = addr >> radeon_vm_block_size;
829 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
830 unsigned nptes;
831 uint64_t pte;
832 int r;
833
834 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
835 r = reservation_object_reserve_shared(pt->tbo.resv);
836 if (r)
837 return r;
838
839 if ((addr & ~mask) == (end & ~mask))
840 nptes = end - addr;
841 else
842 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
843
844 pte = radeon_bo_gpu_offset(pt);
845 pte += (addr & mask) * 8;
846
847 if ((last_pte + 8 * count) != pte) {
848
849 if (count) {
850 radeon_vm_frag_ptes(rdev, ib, last_pte,
851 last_pte + 8 * count,
852 last_dst, flags);
853 }
854
855 count = nptes;
856 last_pte = pte;
857 last_dst = dst;
858 } else {
859 count += nptes;
860 }
861
862 addr += nptes;
863 dst += nptes * RADEON_GPU_PAGE_SIZE;
864 }
865
866 if (count) {
867 radeon_vm_frag_ptes(rdev, ib, last_pte,
868 last_pte + 8 * count,
869 last_dst, flags);
870 }
871
872 return 0;
873 }
874
875 /**
876 * radeon_vm_fence_pts - fence page tables after an update
877 *
878 * @vm: requested vm
879 * @start: start of GPU address range
880 * @end: end of GPU address range
881 * @fence: fence to use
882 *
883 * Fence the page tables in the range @start - @end (cayman+).
884 *
885 * Global and local mutex must be locked!
886 */
radeon_vm_fence_pts(struct radeon_vm * vm,uint64_t start,uint64_t end,struct radeon_fence * fence)887 static void radeon_vm_fence_pts(struct radeon_vm *vm,
888 uint64_t start, uint64_t end,
889 struct radeon_fence *fence)
890 {
891 unsigned i;
892
893 start >>= radeon_vm_block_size;
894 end = (end - 1) >> radeon_vm_block_size;
895
896 for (i = start; i <= end; ++i)
897 radeon_bo_fence(vm->page_tables[i].bo, fence, true);
898 }
899
900 /**
901 * radeon_vm_bo_update - map a bo into the vm page table
902 *
903 * @rdev: radeon_device pointer
904 * @vm: requested vm
905 * @bo: radeon buffer object
906 * @mem: ttm mem
907 *
908 * Fill in the page table entries for @bo (cayman+).
909 * Returns 0 for success, -EINVAL for failure.
910 *
911 * Object have to be reserved and mutex must be locked!
912 */
radeon_vm_bo_update(struct radeon_device * rdev,struct radeon_bo_va * bo_va,struct ttm_mem_reg * mem)913 int radeon_vm_bo_update(struct radeon_device *rdev,
914 struct radeon_bo_va *bo_va,
915 struct ttm_mem_reg *mem)
916 {
917 struct radeon_vm *vm = bo_va->vm;
918 struct radeon_ib ib;
919 unsigned nptes, ncmds, ndw;
920 uint64_t addr;
921 uint32_t flags;
922 int r;
923
924 if (!bo_va->it.start) {
925 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
926 bo_va->bo, vm);
927 return -EINVAL;
928 }
929
930 spin_lock(&vm->status_lock);
931 list_del_init(&bo_va->vm_status);
932 spin_unlock(&vm->status_lock);
933
934 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
935 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
936 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
937 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm))
938 bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
939
940 if (mem) {
941 addr = mem->start << PAGE_SHIFT;
942 if (mem->mem_type != TTM_PL_SYSTEM) {
943 bo_va->flags |= RADEON_VM_PAGE_VALID;
944 }
945 if (mem->mem_type == TTM_PL_TT) {
946 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
947 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
948 bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
949
950 } else {
951 addr += rdev->vm_manager.vram_base_offset;
952 }
953 } else {
954 addr = 0;
955 }
956
957 if (addr == bo_va->addr)
958 return 0;
959 bo_va->addr = addr;
960
961 trace_radeon_vm_bo_update(bo_va);
962
963 nptes = bo_va->it.last - bo_va->it.start + 1;
964
965 /* reserve space for one command every (1 << BLOCK_SIZE) entries
966 or 2k dwords (whatever is smaller) */
967 ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
968
969 /* padding, etc. */
970 ndw = 64;
971
972 flags = radeon_vm_page_flags(bo_va->flags);
973 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
974 /* only copy commands needed */
975 ndw += ncmds * 7;
976
977 } else if (flags & R600_PTE_SYSTEM) {
978 /* header for write data commands */
979 ndw += ncmds * 4;
980
981 /* body of write data command */
982 ndw += nptes * 2;
983
984 } else {
985 /* set page commands needed */
986 ndw += ncmds * 10;
987
988 /* two extra commands for begin/end of fragment */
989 ndw += 2 * 10;
990 }
991
992 /* update too big for an IB */
993 if (ndw > 0xfffff)
994 return -ENOMEM;
995
996 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
997 if (r)
998 return r;
999 ib.length_dw = 0;
1000
1001 if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
1002 unsigned i;
1003
1004 for (i = 0; i < RADEON_NUM_RINGS; ++i)
1005 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
1006 }
1007
1008 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
1009 bo_va->it.last + 1, addr,
1010 radeon_vm_page_flags(bo_va->flags));
1011 if (r) {
1012 radeon_ib_free(rdev, &ib);
1013 return r;
1014 }
1015
1016 radeon_asic_vm_pad_ib(rdev, &ib);
1017 WARN_ON(ib.length_dw > ndw);
1018
1019 r = radeon_ib_schedule(rdev, &ib, NULL, false);
1020 if (r) {
1021 radeon_ib_free(rdev, &ib);
1022 return r;
1023 }
1024 ib.fence->is_vm_update = true;
1025 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
1026 radeon_fence_unref(&bo_va->last_pt_update);
1027 bo_va->last_pt_update = radeon_fence_ref(ib.fence);
1028 radeon_ib_free(rdev, &ib);
1029
1030 return 0;
1031 }
1032
1033 /**
1034 * radeon_vm_clear_freed - clear freed BOs in the PT
1035 *
1036 * @rdev: radeon_device pointer
1037 * @vm: requested vm
1038 *
1039 * Make sure all freed BOs are cleared in the PT.
1040 * Returns 0 for success.
1041 *
1042 * PTs have to be reserved and mutex must be locked!
1043 */
radeon_vm_clear_freed(struct radeon_device * rdev,struct radeon_vm * vm)1044 int radeon_vm_clear_freed(struct radeon_device *rdev,
1045 struct radeon_vm *vm)
1046 {
1047 struct radeon_bo_va *bo_va;
1048 int r;
1049
1050 spin_lock(&vm->status_lock);
1051 while (!list_empty(&vm->freed)) {
1052 bo_va = list_first_entry(&vm->freed,
1053 struct radeon_bo_va, vm_status);
1054 spin_unlock(&vm->status_lock);
1055
1056 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1057 radeon_bo_unref(&bo_va->bo);
1058 radeon_fence_unref(&bo_va->last_pt_update);
1059 kfree(bo_va);
1060 if (r)
1061 return r;
1062
1063 spin_lock(&vm->status_lock);
1064 }
1065 spin_unlock(&vm->status_lock);
1066 return 0;
1067
1068 }
1069
1070 /**
1071 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
1072 *
1073 * @rdev: radeon_device pointer
1074 * @vm: requested vm
1075 *
1076 * Make sure all invalidated BOs are cleared in the PT.
1077 * Returns 0 for success.
1078 *
1079 * PTs have to be reserved and mutex must be locked!
1080 */
radeon_vm_clear_invalids(struct radeon_device * rdev,struct radeon_vm * vm)1081 int radeon_vm_clear_invalids(struct radeon_device *rdev,
1082 struct radeon_vm *vm)
1083 {
1084 struct radeon_bo_va *bo_va;
1085 int r;
1086
1087 spin_lock(&vm->status_lock);
1088 while (!list_empty(&vm->invalidated)) {
1089 bo_va = list_first_entry(&vm->invalidated,
1090 struct radeon_bo_va, vm_status);
1091 spin_unlock(&vm->status_lock);
1092
1093 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1094 if (r)
1095 return r;
1096
1097 spin_lock(&vm->status_lock);
1098 }
1099 spin_unlock(&vm->status_lock);
1100
1101 return 0;
1102 }
1103
1104 /**
1105 * radeon_vm_bo_rmv - remove a bo to a specific vm
1106 *
1107 * @rdev: radeon_device pointer
1108 * @bo_va: requested bo_va
1109 *
1110 * Remove @bo_va->bo from the requested vm (cayman+).
1111 *
1112 * Object have to be reserved!
1113 */
radeon_vm_bo_rmv(struct radeon_device * rdev,struct radeon_bo_va * bo_va)1114 void radeon_vm_bo_rmv(struct radeon_device *rdev,
1115 struct radeon_bo_va *bo_va)
1116 {
1117 struct radeon_vm *vm = bo_va->vm;
1118
1119 list_del(&bo_va->bo_list);
1120
1121 mutex_lock(&vm->mutex);
1122 if (bo_va->it.start || bo_va->it.last)
1123 interval_tree_remove(&bo_va->it, &vm->va);
1124 spin_lock(&vm->status_lock);
1125 list_del(&bo_va->vm_status);
1126
1127 if (bo_va->addr) {
1128 bo_va->bo = radeon_bo_ref(bo_va->bo);
1129 list_add(&bo_va->vm_status, &vm->freed);
1130 } else {
1131 radeon_fence_unref(&bo_va->last_pt_update);
1132 kfree(bo_va);
1133 }
1134 spin_unlock(&vm->status_lock);
1135
1136 mutex_unlock(&vm->mutex);
1137 }
1138
1139 /**
1140 * radeon_vm_bo_invalidate - mark the bo as invalid
1141 *
1142 * @rdev: radeon_device pointer
1143 * @vm: requested vm
1144 * @bo: radeon buffer object
1145 *
1146 * Mark @bo as invalid (cayman+).
1147 */
radeon_vm_bo_invalidate(struct radeon_device * rdev,struct radeon_bo * bo)1148 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1149 struct radeon_bo *bo)
1150 {
1151 struct radeon_bo_va *bo_va;
1152
1153 list_for_each_entry(bo_va, &bo->va, bo_list) {
1154 if (bo_va->addr) {
1155 spin_lock(&bo_va->vm->status_lock);
1156 list_del(&bo_va->vm_status);
1157 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1158 spin_unlock(&bo_va->vm->status_lock);
1159 }
1160 }
1161 }
1162
1163 /**
1164 * radeon_vm_init - initialize a vm instance
1165 *
1166 * @rdev: radeon_device pointer
1167 * @vm: requested vm
1168 *
1169 * Init @vm fields (cayman+).
1170 */
radeon_vm_init(struct radeon_device * rdev,struct radeon_vm * vm)1171 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1172 {
1173 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1174 RADEON_VM_PTE_COUNT * 8);
1175 unsigned pd_size, pd_entries, pts_size;
1176 int i, r;
1177
1178 vm->ib_bo_va = NULL;
1179 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1180 vm->ids[i].id = 0;
1181 vm->ids[i].flushed_updates = NULL;
1182 vm->ids[i].last_id_use = NULL;
1183 }
1184 mutex_init(&vm->mutex);
1185 vm->va = RB_ROOT;
1186 spin_lock_init(&vm->status_lock);
1187 INIT_LIST_HEAD(&vm->invalidated);
1188 INIT_LIST_HEAD(&vm->freed);
1189
1190 pd_size = radeon_vm_directory_size(rdev);
1191 pd_entries = radeon_vm_num_pdes(rdev);
1192
1193 /* allocate page table array */
1194 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1195 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1196 if (vm->page_tables == NULL) {
1197 DRM_ERROR("Cannot allocate memory for page table array\n");
1198 return -ENOMEM;
1199 }
1200
1201 r = radeon_bo_create(rdev, pd_size, align, true,
1202 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1203 NULL, &vm->page_directory);
1204 if (r)
1205 return r;
1206
1207 r = radeon_vm_clear_bo(rdev, vm->page_directory);
1208 if (r) {
1209 radeon_bo_unref(&vm->page_directory);
1210 vm->page_directory = NULL;
1211 return r;
1212 }
1213
1214 return 0;
1215 }
1216
1217 /**
1218 * radeon_vm_fini - tear down a vm instance
1219 *
1220 * @rdev: radeon_device pointer
1221 * @vm: requested vm
1222 *
1223 * Tear down @vm (cayman+).
1224 * Unbind the VM and remove all bos from the vm bo list
1225 */
radeon_vm_fini(struct radeon_device * rdev,struct radeon_vm * vm)1226 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1227 {
1228 struct radeon_bo_va *bo_va, *tmp;
1229 int i, r;
1230
1231 if (!RB_EMPTY_ROOT(&vm->va)) {
1232 dev_err(rdev->dev, "still active bo inside vm\n");
1233 }
1234 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
1235 interval_tree_remove(&bo_va->it, &vm->va);
1236 r = radeon_bo_reserve(bo_va->bo, false);
1237 if (!r) {
1238 list_del_init(&bo_va->bo_list);
1239 radeon_bo_unreserve(bo_va->bo);
1240 radeon_fence_unref(&bo_va->last_pt_update);
1241 kfree(bo_va);
1242 }
1243 }
1244 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1245 radeon_bo_unref(&bo_va->bo);
1246 radeon_fence_unref(&bo_va->last_pt_update);
1247 kfree(bo_va);
1248 }
1249
1250 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1251 radeon_bo_unref(&vm->page_tables[i].bo);
1252 kfree(vm->page_tables);
1253
1254 radeon_bo_unref(&vm->page_directory);
1255
1256 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1257 radeon_fence_unref(&vm->ids[i].flushed_updates);
1258 radeon_fence_unref(&vm->ids[i].last_id_use);
1259 }
1260
1261 mutex_destroy(&vm->mutex);
1262 }
1263