Lines Matching refs:bo

85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,  in ttm_bo_mem_space_debug()  argument
91 bo, bo->mem.num_pages, bo->mem.size >> 10, in ttm_bo_mem_space_debug()
92 bo->mem.size >> 20); in ttm_bo_mem_space_debug()
100 ttm_mem_type_debug(bo->bdev, mem_type); in ttm_bo_mem_space_debug()
138 struct ttm_buffer_object *bo = in ttm_bo_release_list() local
140 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release_list()
141 size_t acc_size = bo->acc_size; in ttm_bo_release_list()
143 BUG_ON(atomic_read(&bo->list_kref.refcount)); in ttm_bo_release_list()
144 BUG_ON(atomic_read(&bo->kref.refcount)); in ttm_bo_release_list()
145 BUG_ON(atomic_read(&bo->cpu_writers)); in ttm_bo_release_list()
146 BUG_ON(bo->mem.mm_node != NULL); in ttm_bo_release_list()
147 BUG_ON(!list_empty(&bo->lru)); in ttm_bo_release_list()
148 BUG_ON(!list_empty(&bo->ddestroy)); in ttm_bo_release_list()
150 if (bo->ttm) in ttm_bo_release_list()
151 ttm_tt_destroy(bo->ttm); in ttm_bo_release_list()
152 atomic_dec(&bo->glob->bo_count); in ttm_bo_release_list()
153 if (bo->resv == &bo->ttm_resv) in ttm_bo_release_list()
154 reservation_object_fini(&bo->ttm_resv); in ttm_bo_release_list()
155 mutex_destroy(&bo->wu_mutex); in ttm_bo_release_list()
156 if (bo->destroy) in ttm_bo_release_list()
157 bo->destroy(bo); in ttm_bo_release_list()
159 kfree(bo); in ttm_bo_release_list()
164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) in ttm_bo_add_to_lru() argument
166 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_to_lru()
169 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_add_to_lru()
171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { in ttm_bo_add_to_lru()
173 BUG_ON(!list_empty(&bo->lru)); in ttm_bo_add_to_lru()
175 man = &bdev->man[bo->mem.mem_type]; in ttm_bo_add_to_lru()
176 list_add_tail(&bo->lru, &man->lru); in ttm_bo_add_to_lru()
177 kref_get(&bo->list_kref); in ttm_bo_add_to_lru()
179 if (bo->ttm != NULL) { in ttm_bo_add_to_lru()
180 list_add_tail(&bo->swap, &bo->glob->swap_lru); in ttm_bo_add_to_lru()
181 kref_get(&bo->list_kref); in ttm_bo_add_to_lru()
187 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) in ttm_bo_del_from_lru() argument
191 if (!list_empty(&bo->swap)) { in ttm_bo_del_from_lru()
192 list_del_init(&bo->swap); in ttm_bo_del_from_lru()
195 if (!list_empty(&bo->lru)) { in ttm_bo_del_from_lru()
196 list_del_init(&bo->lru); in ttm_bo_del_from_lru()
213 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, in ttm_bo_list_ref_sub() argument
216 kref_sub(&bo->list_kref, count, in ttm_bo_list_ref_sub()
220 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) in ttm_bo_del_sub_from_lru() argument
224 spin_lock(&bo->glob->lru_lock); in ttm_bo_del_sub_from_lru()
225 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_del_sub_from_lru()
226 spin_unlock(&bo->glob->lru_lock); in ttm_bo_del_sub_from_lru()
227 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_del_sub_from_lru()
234 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) in ttm_bo_add_ttm() argument
236 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_ttm()
237 struct ttm_bo_global *glob = bo->glob; in ttm_bo_add_ttm()
241 TTM_ASSERT_LOCKED(&bo->mutex); in ttm_bo_add_ttm()
242 bo->ttm = NULL; in ttm_bo_add_ttm()
247 switch (bo->type) { in ttm_bo_add_ttm()
252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
254 if (unlikely(bo->ttm == NULL)) in ttm_bo_add_ttm()
258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
261 if (unlikely(bo->ttm == NULL)) { in ttm_bo_add_ttm()
265 bo->ttm->sg = bo->sg; in ttm_bo_add_ttm()
276 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, in ttm_bo_handle_move_mem() argument
281 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_handle_move_mem()
282 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); in ttm_bo_handle_move_mem()
284 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
289 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { in ttm_bo_handle_move_mem()
293 ttm_bo_unmap_virtual_locked(bo); in ttm_bo_handle_move_mem()
302 if (bo->ttm == NULL) { in ttm_bo_handle_move_mem()
304 ret = ttm_bo_add_ttm(bo, zero); in ttm_bo_handle_move_mem()
309 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); in ttm_bo_handle_move_mem()
314 ret = ttm_tt_bind(bo->ttm, mem); in ttm_bo_handle_move_mem()
319 if (bo->mem.mem_type == TTM_PL_SYSTEM) { in ttm_bo_handle_move_mem()
321 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
322 bo->mem = *mem; in ttm_bo_handle_move_mem()
329 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
333 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); in ttm_bo_handle_move_mem()
335 ret = bdev->driver->move(bo, evict, interruptible, in ttm_bo_handle_move_mem()
338 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); in ttm_bo_handle_move_mem()
343 *mem = bo->mem; in ttm_bo_handle_move_mem()
344 bo->mem = tmp_mem; in ttm_bo_handle_move_mem()
345 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
346 bo->mem = *mem; in ttm_bo_handle_move_mem()
354 if (bo->evicted) { in ttm_bo_handle_move_mem()
356 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); in ttm_bo_handle_move_mem()
360 bo->evicted = false; in ttm_bo_handle_move_mem()
363 if (bo->mem.mm_node) { in ttm_bo_handle_move_mem()
364 bo->offset = (bo->mem.start << PAGE_SHIFT) + in ttm_bo_handle_move_mem()
365 bdev->man[bo->mem.mem_type].gpu_offset; in ttm_bo_handle_move_mem()
366 bo->cur_placement = bo->mem.placement; in ttm_bo_handle_move_mem()
368 bo->offset = 0; in ttm_bo_handle_move_mem()
373 new_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
374 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { in ttm_bo_handle_move_mem()
375 ttm_tt_unbind(bo->ttm); in ttm_bo_handle_move_mem()
376 ttm_tt_destroy(bo->ttm); in ttm_bo_handle_move_mem()
377 bo->ttm = NULL; in ttm_bo_handle_move_mem()
391 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) in ttm_bo_cleanup_memtype_use() argument
393 if (bo->bdev->driver->move_notify) in ttm_bo_cleanup_memtype_use()
394 bo->bdev->driver->move_notify(bo, NULL); in ttm_bo_cleanup_memtype_use()
396 if (bo->ttm) { in ttm_bo_cleanup_memtype_use()
397 ttm_tt_unbind(bo->ttm); in ttm_bo_cleanup_memtype_use()
398 ttm_tt_destroy(bo->ttm); in ttm_bo_cleanup_memtype_use()
399 bo->ttm = NULL; in ttm_bo_cleanup_memtype_use()
401 ttm_bo_mem_put(bo, &bo->mem); in ttm_bo_cleanup_memtype_use()
403 ww_mutex_unlock (&bo->resv->lock); in ttm_bo_cleanup_memtype_use()
406 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) in ttm_bo_flush_all_fences() argument
412 fobj = reservation_object_get_list(bo->resv); in ttm_bo_flush_all_fences()
413 fence = reservation_object_get_excl(bo->resv); in ttm_bo_flush_all_fences()
419 reservation_object_held(bo->resv)); in ttm_bo_flush_all_fences()
426 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) in ttm_bo_cleanup_refs_or_queue() argument
428 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_cleanup_refs_or_queue()
429 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_or_queue()
434 ret = __ttm_bo_reserve(bo, false, true, false, NULL); in ttm_bo_cleanup_refs_or_queue()
437 if (!ttm_bo_wait(bo, false, false, true)) { in ttm_bo_cleanup_refs_or_queue()
438 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_cleanup_refs_or_queue()
441 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_cleanup_refs_or_queue()
443 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_cleanup_refs_or_queue()
447 ttm_bo_flush_all_fences(bo); in ttm_bo_cleanup_refs_or_queue()
454 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { in ttm_bo_cleanup_refs_or_queue()
455 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; in ttm_bo_cleanup_refs_or_queue()
456 ttm_bo_add_to_lru(bo); in ttm_bo_cleanup_refs_or_queue()
459 __ttm_bo_unreserve(bo); in ttm_bo_cleanup_refs_or_queue()
462 kref_get(&bo->list_kref); in ttm_bo_cleanup_refs_or_queue()
463 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_cleanup_refs_or_queue()
482 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, in ttm_bo_cleanup_refs_and_unlock() argument
486 struct ttm_bo_global *glob = bo->glob; in ttm_bo_cleanup_refs_and_unlock()
490 ret = ttm_bo_wait(bo, false, false, true); in ttm_bo_cleanup_refs_and_unlock()
494 ww_mutex_unlock(&bo->resv->lock); in ttm_bo_cleanup_refs_and_unlock()
497 lret = reservation_object_wait_timeout_rcu(bo->resv, in ttm_bo_cleanup_refs_and_unlock()
508 ret = __ttm_bo_reserve(bo, false, true, false, NULL); in ttm_bo_cleanup_refs_and_unlock()
527 ret = ttm_bo_wait(bo, false, false, true); in ttm_bo_cleanup_refs_and_unlock()
531 if (ret || unlikely(list_empty(&bo->ddestroy))) { in ttm_bo_cleanup_refs_and_unlock()
532 __ttm_bo_unreserve(bo); in ttm_bo_cleanup_refs_and_unlock()
537 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_cleanup_refs_and_unlock()
538 list_del_init(&bo->ddestroy); in ttm_bo_cleanup_refs_and_unlock()
542 ttm_bo_cleanup_memtype_use(bo); in ttm_bo_cleanup_refs_and_unlock()
544 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_cleanup_refs_and_unlock()
623 struct ttm_buffer_object *bo = in ttm_bo_release() local
625 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release()
626 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_release()
628 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); in ttm_bo_release()
630 ttm_mem_io_free_vm(bo); in ttm_bo_release()
632 ttm_bo_cleanup_refs_or_queue(bo); in ttm_bo_release()
633 kref_put(&bo->list_kref, ttm_bo_release_list); in ttm_bo_release()
638 struct ttm_buffer_object *bo = *p_bo; in ttm_bo_unref() local
641 kref_put(&bo->kref, ttm_bo_release); in ttm_bo_unref()
659 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, in ttm_bo_evict() argument
662 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_evict()
667 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); in ttm_bo_evict()
676 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_evict()
678 evict_mem = bo->mem; in ttm_bo_evict()
685 bdev->driver->evict_flags(bo, &placement); in ttm_bo_evict()
686 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, in ttm_bo_evict()
691 bo); in ttm_bo_evict()
692 ttm_bo_mem_space_debug(bo, &placement); in ttm_bo_evict()
697 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, in ttm_bo_evict()
702 ttm_bo_mem_put(bo, &evict_mem); in ttm_bo_evict()
705 bo->evicted = true; in ttm_bo_evict()
718 struct ttm_buffer_object *bo; in ttm_mem_evict_first() local
722 list_for_each_entry(bo, &man->lru, lru) { in ttm_mem_evict_first()
723 ret = __ttm_bo_reserve(bo, false, true, false, NULL); in ttm_mem_evict_first()
729 if (place->fpfn >= (bo->mem.start + bo->mem.size) || in ttm_mem_evict_first()
730 (place->lpfn && place->lpfn <= bo->mem.start)) { in ttm_mem_evict_first()
731 __ttm_bo_unreserve(bo); in ttm_mem_evict_first()
746 kref_get(&bo->list_kref); in ttm_mem_evict_first()
748 if (!list_empty(&bo->ddestroy)) { in ttm_mem_evict_first()
749 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, in ttm_mem_evict_first()
751 kref_put(&bo->list_kref, ttm_bo_release_list); in ttm_mem_evict_first()
755 put_count = ttm_bo_del_from_lru(bo); in ttm_mem_evict_first()
760 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_mem_evict_first()
762 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); in ttm_mem_evict_first()
763 ttm_bo_unreserve(bo); in ttm_mem_evict_first()
765 kref_put(&bo->list_kref, ttm_bo_release_list); in ttm_mem_evict_first()
769 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) in ttm_bo_mem_put() argument
771 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; in ttm_bo_mem_put()
782 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, in ttm_bo_mem_force_space() argument
789 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_force_space()
794 ret = (*man->func->get_node)(man, bo, place, mem); in ttm_bo_mem_force_space()
862 int ttm_bo_mem_space(struct ttm_buffer_object *bo, in ttm_bo_mem_space() argument
868 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_space()
892 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, in ttm_bo_mem_space()
906 ret = (*man->func->get_node)(man, bo, place, mem); in ttm_bo_mem_space()
935 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, in ttm_bo_mem_space()
951 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, in ttm_bo_mem_space()
965 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, in ttm_bo_move_buffer() argument
973 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_move_buffer()
980 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); in ttm_bo_move_buffer()
983 mem.num_pages = bo->num_pages; in ttm_bo_move_buffer()
985 mem.page_alignment = bo->mem.page_alignment; in ttm_bo_move_buffer()
991 ret = ttm_bo_mem_space(bo, placement, &mem, in ttm_bo_move_buffer()
995 ret = ttm_bo_handle_move_mem(bo, &mem, false, in ttm_bo_move_buffer()
999 ttm_bo_mem_put(bo, &mem); in ttm_bo_move_buffer()
1038 int ttm_bo_validate(struct ttm_buffer_object *bo, in ttm_bo_validate() argument
1046 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_validate()
1050 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { in ttm_bo_validate()
1051 ret = ttm_bo_move_buffer(bo, placement, interruptible, in ttm_bo_validate()
1060 ttm_flag_masked(&bo->mem.placement, new_flags, in ttm_bo_validate()
1066 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in ttm_bo_validate()
1067 ret = ttm_bo_add_ttm(bo, true); in ttm_bo_validate()
1076 struct ttm_buffer_object *bo, in ttm_bo_init() argument
1097 (*destroy)(bo); in ttm_bo_init()
1099 kfree(bo); in ttm_bo_init()
1107 (*destroy)(bo); in ttm_bo_init()
1109 kfree(bo); in ttm_bo_init()
1113 bo->destroy = destroy; in ttm_bo_init()
1115 kref_init(&bo->kref); in ttm_bo_init()
1116 kref_init(&bo->list_kref); in ttm_bo_init()
1117 atomic_set(&bo->cpu_writers, 0); in ttm_bo_init()
1118 INIT_LIST_HEAD(&bo->lru); in ttm_bo_init()
1119 INIT_LIST_HEAD(&bo->ddestroy); in ttm_bo_init()
1120 INIT_LIST_HEAD(&bo->swap); in ttm_bo_init()
1121 INIT_LIST_HEAD(&bo->io_reserve_lru); in ttm_bo_init()
1122 mutex_init(&bo->wu_mutex); in ttm_bo_init()
1123 bo->bdev = bdev; in ttm_bo_init()
1124 bo->glob = bdev->glob; in ttm_bo_init()
1125 bo->type = type; in ttm_bo_init()
1126 bo->num_pages = num_pages; in ttm_bo_init()
1127 bo->mem.size = num_pages << PAGE_SHIFT; in ttm_bo_init()
1128 bo->mem.mem_type = TTM_PL_SYSTEM; in ttm_bo_init()
1129 bo->mem.num_pages = bo->num_pages; in ttm_bo_init()
1130 bo->mem.mm_node = NULL; in ttm_bo_init()
1131 bo->mem.page_alignment = page_alignment; in ttm_bo_init()
1132 bo->mem.bus.io_reserved_vm = false; in ttm_bo_init()
1133 bo->mem.bus.io_reserved_count = 0; in ttm_bo_init()
1134 bo->priv_flags = 0; in ttm_bo_init()
1135 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); in ttm_bo_init()
1136 bo->persistent_swap_storage = persistent_swap_storage; in ttm_bo_init()
1137 bo->acc_size = acc_size; in ttm_bo_init()
1138 bo->sg = sg; in ttm_bo_init()
1140 bo->resv = resv; in ttm_bo_init()
1141 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_init()
1143 bo->resv = &bo->ttm_resv; in ttm_bo_init()
1144 reservation_object_init(&bo->ttm_resv); in ttm_bo_init()
1146 atomic_inc(&bo->glob->bo_count); in ttm_bo_init()
1147 drm_vma_node_reset(&bo->vma_node); in ttm_bo_init()
1153 if (bo->type == ttm_bo_type_device || in ttm_bo_init()
1154 bo->type == ttm_bo_type_sg) in ttm_bo_init()
1155 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, in ttm_bo_init()
1156 bo->mem.num_pages); in ttm_bo_init()
1162 locked = ww_mutex_trylock(&bo->resv->lock); in ttm_bo_init()
1167 ret = ttm_bo_validate(bo, placement, interruptible, false); in ttm_bo_init()
1170 ttm_bo_unreserve(bo); in ttm_bo_init()
1173 ttm_bo_unref(&bo); in ttm_bo_init()
1217 struct ttm_buffer_object *bo; in ttm_bo_create() local
1221 bo = kzalloc(sizeof(*bo), GFP_KERNEL); in ttm_bo_create()
1222 if (unlikely(bo == NULL)) in ttm_bo_create()
1226 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, in ttm_bo_create()
1230 *p_bo = bo; in ttm_bo_create()
1513 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) in ttm_bo_unmap_virtual_locked() argument
1515 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual_locked()
1517 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); in ttm_bo_unmap_virtual_locked()
1518 ttm_mem_io_free_vm(bo); in ttm_bo_unmap_virtual_locked()
1521 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) in ttm_bo_unmap_virtual() argument
1523 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual()
1524 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_unmap_virtual()
1527 ttm_bo_unmap_virtual_locked(bo); in ttm_bo_unmap_virtual()
1534 int ttm_bo_wait(struct ttm_buffer_object *bo, in ttm_bo_wait() argument
1543 resv = bo->resv; in ttm_bo_wait()
1577 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); in ttm_bo_wait()
1582 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) in ttm_bo_synccpu_write_grab() argument
1590 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); in ttm_bo_synccpu_write_grab()
1593 ret = ttm_bo_wait(bo, false, true, no_wait); in ttm_bo_synccpu_write_grab()
1595 atomic_inc(&bo->cpu_writers); in ttm_bo_synccpu_write_grab()
1596 ttm_bo_unreserve(bo); in ttm_bo_synccpu_write_grab()
1601 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) in ttm_bo_synccpu_write_release() argument
1603 atomic_dec(&bo->cpu_writers); in ttm_bo_synccpu_write_release()
1616 struct ttm_buffer_object *bo; in ttm_bo_swapout() local
1622 list_for_each_entry(bo, &glob->swap_lru, swap) { in ttm_bo_swapout()
1623 ret = __ttm_bo_reserve(bo, false, true, false, NULL); in ttm_bo_swapout()
1633 kref_get(&bo->list_kref); in ttm_bo_swapout()
1635 if (!list_empty(&bo->ddestroy)) { in ttm_bo_swapout()
1636 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); in ttm_bo_swapout()
1637 kref_put(&bo->list_kref, ttm_bo_release_list); in ttm_bo_swapout()
1641 put_count = ttm_bo_del_from_lru(bo); in ttm_bo_swapout()
1644 ttm_bo_list_ref_sub(bo, put_count, true); in ttm_bo_swapout()
1650 ret = ttm_bo_wait(bo, false, false, false); in ttm_bo_swapout()
1655 if ((bo->mem.placement & swap_placement) != swap_placement) { in ttm_bo_swapout()
1658 evict_mem = bo->mem; in ttm_bo_swapout()
1663 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, in ttm_bo_swapout()
1669 ttm_bo_unmap_virtual(bo); in ttm_bo_swapout()
1676 if (bo->bdev->driver->swap_notify) in ttm_bo_swapout()
1677 bo->bdev->driver->swap_notify(bo); in ttm_bo_swapout()
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); in ttm_bo_swapout()
1688 __ttm_bo_unreserve(bo); in ttm_bo_swapout()
1689 kref_put(&bo->list_kref, ttm_bo_release_list); in ttm_bo_swapout()
1706 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) in ttm_bo_wait_unreserved() argument
1717 ret = mutex_lock_interruptible(&bo->wu_mutex); in ttm_bo_wait_unreserved()
1720 if (!ww_mutex_is_locked(&bo->resv->lock)) in ttm_bo_wait_unreserved()
1722 ret = __ttm_bo_reserve(bo, true, false, false, NULL); in ttm_bo_wait_unreserved()
1725 __ttm_bo_unreserve(bo); in ttm_bo_wait_unreserved()
1728 mutex_unlock(&bo->wu_mutex); in ttm_bo_wait_unreserved()