Searched refs:tbo (Results 1 - 49 of 49) sorted by relevance

/linux-4.4.14/drivers/gpu/drm/virtio/
H A Dvirtgpu_object.c28 static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) virtio_gpu_ttm_bo_destroy() argument
33 bo = container_of(tbo, struct virtio_gpu_object, tbo); virtio_gpu_ttm_bo_destroy()
89 ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type, virtio_gpu_object_create()
110 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); virtio_gpu_object_kmap()
123 struct page **pages = bo->tbo.ttm->pages; virtio_gpu_object_get_sg_table()
124 int nr_pages = bo->tbo.num_pages; virtio_gpu_object_get_sg_table()
130 if (bo->tbo.ttm->state == tt_unpopulated) virtio_gpu_object_get_sg_table()
131 bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); virtio_gpu_object_get_sg_table()
158 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); virtio_gpu_object_wait()
161 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); virtio_gpu_object_wait()
162 ttm_bo_unreserve(&bo->tbo); virtio_gpu_object_wait()
H A Dvirtgpu_drv.h63 struct ttm_buffer_object tbo; member in struct:virtio_gpu_object
377 ttm_bo_reference(&bo->tbo); virtio_gpu_object_ref()
383 struct ttm_buffer_object *tbo; virtio_gpu_object_unref() local
387 tbo = &((*bo)->tbo); virtio_gpu_object_unref()
388 ttm_bo_unref(&tbo); virtio_gpu_object_unref()
389 if (tbo == NULL) virtio_gpu_object_unref()
395 return drm_vma_node_offset_addr(&bo->tbo.vma_node); virtio_gpu_object_mmap_offset()
403 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); virtio_gpu_object_reserve()
417 ttm_bo_unreserve(&bo->tbo); virtio_gpu_object_unreserve()
H A Dvirtgpu_ttm.c382 static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo, virtio_gpu_bo_move_notify() argument
388 bo = container_of(tbo, struct virtio_gpu_object, tbo); virtio_gpu_bo_move_notify()
404 static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) virtio_gpu_bo_swap_notify() argument
409 bo = container_of(tbo, struct virtio_gpu_object, tbo); virtio_gpu_bo_swap_notify()
H A Dvirtgpu_ioctl.c69 qobj = container_of(bo, struct virtio_gpu_object, tbo); list_for_each_entry()
86 qobj = container_of(bo, struct virtio_gpu_object, tbo); list_for_each_entry()
145 buflist[i].bo = &qobj->tbo; virtio_gpu_execbuffer()
277 mainbuf.bo = &qobj->tbo; virtio_gpu_resource_create_ioctl()
387 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, virtio_gpu_transfer_from_host_ioctl()
397 reservation_object_add_excl_fence(qobj->tbo.resv, virtio_gpu_transfer_from_host_ioctl()
431 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, virtio_gpu_transfer_to_host_ioctl()
446 reservation_object_add_excl_fence(qobj->tbo.resv, virtio_gpu_transfer_to_host_ioctl()
H A Dvirtgpu_display.c95 reservation_object_add_excl_fence(qobj->tbo.resv, virtio_gpu_crtc_cursor_set()
/linux-4.4.14/drivers/gpu/drm/qxl/
H A Dqxl_object.c30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) qxl_ttm_bo_destroy() argument
35 bo = container_of(tbo, struct qxl_bo, tbo); qxl_ttm_bo_destroy()
111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, qxl_bo_create()
135 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); qxl_bo_kmap()
147 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kmap_atomic_page()
152 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) qxl_bo_kmap_atomic_page()
154 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) qxl_bo_kmap_atomic_page()
160 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kmap_atomic_page()
163 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); qxl_bo_kmap_atomic_page()
189 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kunmap_atomic_page()
192 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) qxl_bo_kunmap_atomic_page()
194 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) qxl_bo_kunmap_atomic_page()
202 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kunmap_atomic_page()
236 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); qxl_bo_pin()
261 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); qxl_bo_unpin()
H A Dqxl_object.h34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); qxl_bo_reserve()
47 ttm_bo_unreserve(&bo->tbo); qxl_bo_unreserve()
52 return bo->tbo.offset; qxl_bo_gpu_offset()
57 return bo->tbo.num_pages << PAGE_SHIFT; qxl_bo_size()
62 return drm_vma_node_offset_addr(&bo->tbo.vma_node); qxl_bo_mmap_offset()
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); qxl_bo_wait()
80 *mem_type = bo->tbo.mem.mem_type; qxl_bo_wait()
82 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); qxl_bo_wait()
83 ttm_bo_unreserve(&bo->tbo); qxl_bo_wait()
H A Dqxl_gem.c35 struct ttm_buffer_object *tbo; qxl_gem_object_free() local
41 tbo = &qobj->tbo; qxl_gem_object_free()
42 ttm_bo_unref(&tbo); qxl_gem_object_free()
H A Dqxl_drv.h104 /* Protected by tbo.reserved */
107 struct ttm_buffer_object tbo; member in struct:qxl_bo
122 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
377 /* TODO - need to hold one of the locks to read tbo.offset */ qxl_bo_physical_address()
378 return slot->high_bits | (bo->tbo.offset + offset); qxl_bo_physical_address()
H A Dqxl_debugfs.c65 fobj = rcu_dereference(bo->tbo.resv->fence); qxl_debugfs_buffers_info()
H A Dqxl_release.c219 if (entry->tv.bo == &bo->tbo) qxl_release_list_add()
228 entry->tv.bo = &bo->tbo; qxl_release_list_add()
240 ret = ttm_bo_validate(&bo->tbo, &bo->placement, qxl_release_validate_bo()
246 ret = reservation_object_reserve_shared(bo->tbo.resv); qxl_release_validate_bo()
H A Dqxl_ttm.c204 qbo = container_of(bo, struct qxl_bo, tbo); qxl_evict_flags()
368 qbo = container_of(bo, struct qxl_bo, tbo); qxl_bo_move_notify()
H A Dqxl_cmd.c517 /* TODO - need to hold one of the locks to read tbo.offset */ qxl_hw_surface_alloc()
520 cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset; qxl_hw_surface_alloc()
627 ret = ttm_bo_wait(&surf->tbo, true, true, !stall); qxl_reap_surf()
H A Dqxl_ioctl.c329 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, qxl_update_area_ioctl()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dradeon_prime.c35 int npages = bo->tbo.num_pages; radeon_gem_prime_get_sg_table()
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); radeon_gem_prime_get_sg_table()
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, radeon_gem_prime_vmap()
116 return bo->tbo.resv; radeon_gem_prime_res_obj()
124 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_gem_prime_export()
H A Dradeon_object.c54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; radeon_update_memory_usage()
72 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) radeon_ttm_bo_destroy() argument
76 bo = container_of(tbo, struct radeon_bo, tbo); radeon_ttm_bo_destroy()
78 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); radeon_ttm_bo_destroy()
260 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, radeon_bo_create()
285 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); radeon_bo_kmap()
311 ttm_bo_reference(&bo->tbo); radeon_bo_ref()
317 struct ttm_buffer_object *tbo; radeon_bo_unref() local
323 tbo = &((*bo)->tbo); radeon_bo_unref()
324 ttm_bo_unref(&tbo); radeon_bo_unref()
325 if (tbo == NULL) radeon_bo_unref()
334 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_bo_pin_restricted()
369 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_bo_pin_restricted()
404 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_bo_unpin()
406 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) radeon_bo_unpin()
542 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); list_for_each_entry()
565 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); list_for_each_entry()
599 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_get_surface_reg()
631 ttm_bo_unmap_virtual(&old_object->tbo); radeon_bo_get_surface_reg()
641 bo->tbo.mem.start << PAGE_SHIFT, radeon_bo_get_surface_reg()
642 bo->tbo.num_pages << PAGE_SHIFT); radeon_bo_get_surface_reg()
725 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_get_tiling_flags()
737 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_check_tiling()
747 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { radeon_bo_check_tiling()
770 rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_move_notify()
791 rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_fault_reserve_notify()
831 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); radeon_bo_wait()
835 *mem_type = bo->tbo.mem.mem_type; radeon_bo_wait()
837 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); radeon_bo_wait()
838 ttm_bo_unreserve(&bo->tbo); radeon_bo_wait()
853 struct reservation_object *resv = bo->tbo.resv; radeon_bo_fence()
H A Dradeon_object.h68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); radeon_bo_reserve()
79 ttm_bo_unreserve(&bo->tbo); radeon_bo_unreserve()
93 return bo->tbo.offset; radeon_bo_gpu_offset()
98 return bo->tbo.num_pages << PAGE_SHIFT; radeon_bo_size()
103 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; radeon_bo_ngpu_pages()
108 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; radeon_bo_gpu_page_alignment()
119 return drm_vma_node_offset_addr(&bo->tbo.vma_node); radeon_bo_mmap_offset()
H A Dradeon_gem.c38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); radeon_gem_object_free()
114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); radeon_gem_set_domain()
323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); radeon_gem_userptr_ioctl()
342 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); radeon_gem_userptr_ioctl()
412 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { radeon_mode_dumb_mmap()
444 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true); radeon_gem_busy_ioctl()
450 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); radeon_gem_busy_ioctl()
473 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); radeon_gem_wait_idle_ioctl()
480 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); radeon_gem_wait_idle_ioctl()
551 tv.bo = &bo_va->bo->tbo; radeon_gem_va_update_vm()
577 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); radeon_gem_va_update_vm()
712 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) radeon_gem_op_ioctl()
781 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); radeon_debugfs_gem_info()
H A Dradeon_mn.c145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) radeon_mn_invalidate_range_start()
154 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, radeon_mn_invalidate_range_start()
160 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_mn_invalidate_range_start()
H A Dradeon_benchmark.c125 dobj->tbo.resv); radeon_benchmark_move()
136 dobj->tbo.resv); radeon_benchmark_move()
H A Dradeon_cs.c154 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { radeon_cs_parser_relocs()
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; radeon_cs_parser_relocs()
241 resv = reloc->robj->tbo.resv; radeon_cs_sync_rings()
385 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; cmp_size_smaller_first()
499 &rdev->ring_tmp_bo.bo->tbo.mem); radeon_bo_vm_update_pte()
513 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); radeon_bo_vm_update_pte()
H A Dradeon_test.c122 vram_obj->tbo.resv); radeon_do_test_moves()
126 vram_obj->tbo.resv); radeon_do_test_moves()
173 vram_obj->tbo.resv); radeon_do_test_moves()
177 vram_obj->tbo.resv); radeon_do_test_moves()
H A Dradeon_vm.c144 list[0].tv.bo = &vm->page_directory->tbo; radeon_vm_get_bos()
156 list[idx].tv.bo = &list[idx].robj->tbo; radeon_vm_get_bos()
399 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); radeon_vm_clear_bo()
703 radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); radeon_vm_update_page_directory()
831 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); radeon_vm_update_ptes()
832 r = reservation_object_reserve_shared(pt->tbo.resv); radeon_vm_update_ptes()
943 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) radeon_vm_bo_update()
H A Dradeon_trace.h24 __entry->pages = bo->tbo.num_pages;
H A Dradeon_ttm.c196 rbo = container_of(bo, struct radeon_bo, tbo); radeon_evict_flags()
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); radeon_verify_access()
H A Dradeon_uvd.c436 f = reservation_object_get_excl(bo->tbo.resv); radeon_uvd_cs_msg()
H A Dradeon_pm.c152 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) radeon_unmap_vram_bos()
153 ttm_bo_unmap_virtual(&bo->tbo); radeon_unmap_vram_bos()
H A Dradeon.h486 /* Protected by tbo.reserved */
490 struct ttm_buffer_object tbo; member in struct:radeon_bo
H A Dradeon_display.c559 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); radeon_crtc_page_flip()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_prime.c35 int npages = bo->tbo.num_pages; amdgpu_gem_prime_get_sg_table()
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); amdgpu_gem_prime_get_sg_table()
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, amdgpu_gem_prime_vmap()
115 return bo->tbo.resv; amdgpu_gem_prime_res_obj()
124 if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) amdgpu_gem_prime_export()
H A Damdgpu_object.c92 static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_ttm_bo_destroy() argument
96 bo = container_of(tbo, struct amdgpu_bo, tbo); amdgpu_ttm_bo_destroy()
98 amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); amdgpu_ttm_bo_destroy()
274 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, amdgpu_bo_create_restricted()
322 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); amdgpu_bo_kmap()
346 ttm_bo_reference(&bo->tbo); amdgpu_bo_ref()
352 struct ttm_buffer_object *tbo; amdgpu_bo_unref() local
357 tbo = &((*bo)->tbo); amdgpu_bo_unref()
358 ttm_bo_unref(&tbo); amdgpu_bo_unref()
359 if (tbo == NULL) amdgpu_bo_unref()
370 if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) amdgpu_bo_pin_restricted()
416 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); amdgpu_bo_pin_restricted()
451 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); amdgpu_bo_unpin()
453 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) amdgpu_bo_unpin()
515 return ttm_fbdev_mmap(vma, &bo->tbo); amdgpu_bo_fbdev_mmap()
529 lockdep_assert_held(&bo->tbo.resv->lock.base); amdgpu_bo_get_tiling_flags()
595 rbo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_bo_move_notify()
616 abo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_bo_fault_reserve_notify()
662 struct reservation_object *resv = bo->tbo.resv; amdgpu_bo_fence()
H A Damdgpu_object.h74 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); amdgpu_bo_reserve()
85 ttm_bo_unreserve(&bo->tbo); amdgpu_bo_unreserve()
99 return bo->tbo.offset; amdgpu_bo_gpu_offset()
104 return bo->tbo.num_pages << PAGE_SHIFT; amdgpu_bo_size()
109 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; amdgpu_bo_ngpu_pages()
114 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; amdgpu_bo_gpu_page_alignment()
125 return drm_vma_node_offset_addr(&bo->tbo.vma_node); amdgpu_bo_mmap_offset()
H A Damdgpu_gem.c39 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); amdgpu_gem_object_free()
255 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); amdgpu_gem_userptr_ioctl()
274 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); amdgpu_gem_userptr_ioctl()
311 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || amdgpu_mode_dumb_mmap()
376 ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true); amdgpu_gem_wait_idle_ioctl()
378 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout); amdgpu_gem_wait_idle_ioctl()
460 tv.bo = &bo_va->bo->tbo; amdgpu_gem_va_update_vm()
497 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); amdgpu_gem_va_update_vm()
561 tv.bo = &rbo->tbo; amdgpu_gem_va_ioctl()
566 tv_pd.bo = &fpriv->vm.page_directory->tbo; amdgpu_gem_va_ioctl()
633 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; amdgpu_gem_op_ioctl()
642 if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { amdgpu_gem_op_ioctl()
707 domain = amdgpu_mem_type_to_domain(rbo->tbo.mem.mem_type); amdgpu_debugfs_gem_info()
H A Damdgpu_mn.c145 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, amdgpu_mn_invalidate_range_start()
155 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, amdgpu_mn_invalidate_range_start()
161 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); amdgpu_mn_invalidate_range_start()
H A Damdgpu_vm.c104 list[0].tv.bo = &vm->page_directory->tbo; amdgpu_vm_get_bos()
116 list[idx].tv.bo = &list[idx].robj->tbo; amdgpu_vm_get_bos()
348 r = reservation_object_reserve_shared(bo->tbo.resv); amdgpu_vm_clear_bo()
352 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); amdgpu_vm_clear_bo()
499 amdgpu_sync_resv(adev, &ib->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); amdgpu_vm_update_page_directory()
646 amdgpu_sync_resv(adev, &ib->sync, pt->tbo.resv, owner); amdgpu_vm_update_ptes()
647 r = reservation_object_reserve_shared(pt->tbo.resv); amdgpu_vm_update_ptes()
839 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); amdgpu_vm_bo_update()
1072 struct reservation_object *resv = vm->page_directory->tbo.resv; amdgpu_vm_bo_map()
H A Damdgpu_bo_list.c114 if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) { amdgpu_bo_list_set()
119 entry->tv.bo = &entry->robj->tbo; amdgpu_bo_list_set()
H A Damdgpu_cs.c145 if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { amdgpu_cs_user_fence_chunk()
154 p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; amdgpu_cs_user_fence_chunk()
354 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); list_for_each_entry()
374 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); list_for_each_entry()
446 struct reservation_object *resv = e->robj->tbo.resv; amdgpu_cs_sync_rings()
462 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; cmp_size_smaller_first()
549 r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem); amdgpu_bo_vm_update_pte()
H A Damdgpu_uvd.c363 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); amdgpu_uvd_cs_pass1()
533 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, amdgpu_uvd_cs_msg()
853 tv.bo = &bo->tbo; amdgpu_uvd_send_msg()
867 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); amdgpu_uvd_send_msg()
H A Damdgpu_trace.h24 __entry->pages = bo->tbo.num_pages;
H A Damdgpu_ttm.c191 rbo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_evict_flags()
208 struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); amdgpu_verify_access()
H A Damdgpu.h522 /* Protected by tbo.reserved */
526 struct ttm_buffer_object tbo; member in struct:amdgpu_bo
H A Damdgpu_display.c222 r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, amdgpu_crtc_page_flip()
/linux-4.4.14/drivers/gpu/drm/bochs/
H A Dbochs_mm.c74 static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) bochs_bo_ttm_destroy() argument
78 bo = container_of(tbo, struct bochs_bo, bo); bochs_bo_ttm_destroy()
436 struct ttm_buffer_object *tbo; bochs_bo_unref() local
441 tbo = &((*bo)->bo); bochs_bo_unref()
442 ttm_bo_unref(&tbo); bochs_bo_unref()
/linux-4.4.14/drivers/gpu/drm/cirrus/
H A Dcirrus_main.c266 struct ttm_buffer_object *tbo; cirrus_bo_unref() local
271 tbo = &((*bo)->bo); cirrus_bo_unref()
272 ttm_bo_unref(&tbo); cirrus_bo_unref()
H A Dcirrus_ttm.c95 static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo) cirrus_bo_ttm_destroy() argument
99 bo = container_of(tbo, struct cirrus_bo, bo); cirrus_bo_ttm_destroy()
/linux-4.4.14/drivers/gpu/drm/mgag200/
H A Dmgag200_main.c329 struct ttm_buffer_object *tbo; mgag200_bo_unref() local
334 tbo = &((*bo)->bo); mgag200_bo_unref()
335 ttm_bo_unref(&tbo); mgag200_bo_unref()
H A Dmgag200_ttm.c95 static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo) mgag200_bo_ttm_destroy() argument
99 bo = container_of(tbo, struct mgag200_bo, bo); mgag200_bo_ttm_destroy()
/linux-4.4.14/drivers/gpu/drm/ast/
H A Dast_main.c546 struct ttm_buffer_object *tbo; ast_bo_unref() local
551 tbo = &((*bo)->bo); ast_bo_unref()
552 ttm_bo_unref(&tbo); ast_bo_unref()
H A Dast_ttm.c95 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) ast_bo_ttm_destroy() argument
99 bo = container_of(tbo, struct ast_bo, bo); ast_bo_ttm_destroy()

Completed in 1477 milliseconds