Lines Matching refs:bdev
70 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) in ttm_mem_type_debug() argument
72 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; in ttm_mem_type_debug()
100 ttm_mem_type_debug(bo->bdev, mem_type); in ttm_bo_mem_space_debug()
140 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release_list() local
161 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); in ttm_bo_release_list()
166 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_to_lru() local
175 man = &bdev->man[bo->mem.mem_type]; in ttm_bo_add_to_lru()
236 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_add_ttm() local
244 if (bdev->need_dma32) in ttm_bo_add_ttm()
252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, in ttm_bo_add_ttm()
281 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_handle_move_mem() local
282 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); in ttm_bo_handle_move_mem()
283 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); in ttm_bo_handle_move_mem()
284 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
285 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; in ttm_bo_handle_move_mem()
320 if (bdev->driver->move_notify) in ttm_bo_handle_move_mem()
321 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
328 if (bdev->driver->move_notify) in ttm_bo_handle_move_mem()
329 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
334 else if (bdev->driver->move) in ttm_bo_handle_move_mem()
335 ret = bdev->driver->move(bo, evict, interruptible, in ttm_bo_handle_move_mem()
341 if (bdev->driver->move_notify) { in ttm_bo_handle_move_mem()
345 bdev->driver->move_notify(bo, mem); in ttm_bo_handle_move_mem()
355 if (bdev->driver->invalidate_caches) { in ttm_bo_handle_move_mem()
356 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); in ttm_bo_handle_move_mem()
365 bdev->man[bo->mem.mem_type].gpu_offset; in ttm_bo_handle_move_mem()
373 new_man = &bdev->man[bo->mem.mem_type]; in ttm_bo_handle_move_mem()
393 if (bo->bdev->driver->move_notify) in ttm_bo_cleanup_memtype_use()
394 bo->bdev->driver->move_notify(bo, NULL); in ttm_bo_cleanup_memtype_use()
428 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_cleanup_refs_or_queue() local
463 list_add_tail(&bo->ddestroy, &bdev->ddestroy); in ttm_bo_cleanup_refs_or_queue()
466 schedule_delayed_work(&bdev->wq, in ttm_bo_cleanup_refs_or_queue()
554 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) in ttm_bo_delayed_delete() argument
556 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_delayed_delete()
561 if (list_empty(&bdev->ddestroy)) in ttm_bo_delayed_delete()
564 entry = list_first_entry(&bdev->ddestroy, in ttm_bo_delayed_delete()
571 if (entry->ddestroy.next != &bdev->ddestroy) { in ttm_bo_delayed_delete()
612 struct ttm_bo_device *bdev = in ttm_bo_delayed_workqueue() local
615 if (ttm_bo_delayed_delete(bdev, false)) { in ttm_bo_delayed_workqueue()
616 schedule_delayed_work(&bdev->wq, in ttm_bo_delayed_workqueue()
625 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_release() local
626 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_release()
628 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); in ttm_bo_release()
645 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev) in ttm_bo_lock_delayed_workqueue() argument
647 return cancel_delayed_work_sync(&bdev->wq); in ttm_bo_lock_delayed_workqueue()
651 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched) in ttm_bo_unlock_delayed_workqueue() argument
654 schedule_delayed_work(&bdev->wq, in ttm_bo_unlock_delayed_workqueue()
662 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_evict() local
685 bdev->driver->evict_flags(bo, &placement); in ttm_bo_evict()
710 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, in ttm_mem_evict_first() argument
716 struct ttm_bo_global *glob = bdev->glob; in ttm_mem_evict_first()
717 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; in ttm_mem_evict_first()
771 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; in ttm_bo_mem_put()
789 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_force_space() local
790 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; in ttm_bo_mem_force_space()
799 ret = ttm_mem_evict_first(bdev, mem_type, place, in ttm_bo_mem_force_space()
868 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_mem_space() local
884 man = &bdev->man[mem_type]; in ttm_bo_mem_space()
927 man = &bdev->man[mem_type]; in ttm_bo_mem_space()
1079 int ttm_bo_init(struct ttm_bo_device *bdev, in ttm_bo_init() argument
1094 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; in ttm_bo_init()
1127 bo->bdev = bdev; in ttm_bo_init()
1128 bo->glob = bdev->glob; in ttm_bo_init()
1159 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, in ttm_bo_init()
1183 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev, in ttm_bo_acc_size() argument
1197 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, in ttm_bo_dma_acc_size() argument
1212 int ttm_bo_create(struct ttm_bo_device *bdev, in ttm_bo_create() argument
1229 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); in ttm_bo_create()
1230 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, in ttm_bo_create()
1240 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, in ttm_bo_force_list_clean() argument
1243 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; in ttm_bo_force_list_clean()
1244 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_force_list_clean()
1254 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); in ttm_bo_force_list_clean()
1268 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) in ttm_bo_clean_mm() argument
1277 man = &bdev->man[mem_type]; in ttm_bo_clean_mm()
1290 ttm_bo_force_list_clean(bdev, mem_type, false); in ttm_bo_clean_mm()
1299 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) in ttm_bo_evict_mm() argument
1301 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; in ttm_bo_evict_mm()
1313 return ttm_bo_force_list_clean(bdev, mem_type, true); in ttm_bo_evict_mm()
1317 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, in ttm_bo_init_mm() argument
1324 man = &bdev->man[type]; in ttm_bo_init_mm()
1331 ret = bdev->driver->init_mem_type(bdev, type, man); in ttm_bo_init_mm()
1334 man->bdev = bdev; in ttm_bo_init_mm()
1414 int ttm_bo_device_release(struct ttm_bo_device *bdev) in ttm_bo_device_release() argument
1419 struct ttm_bo_global *glob = bdev->glob; in ttm_bo_device_release()
1422 man = &bdev->man[i]; in ttm_bo_device_release()
1425 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { in ttm_bo_device_release()
1435 list_del(&bdev->device_list); in ttm_bo_device_release()
1438 cancel_delayed_work_sync(&bdev->wq); in ttm_bo_device_release()
1440 while (ttm_bo_delayed_delete(bdev, true)) in ttm_bo_device_release()
1444 if (list_empty(&bdev->ddestroy)) in ttm_bo_device_release()
1447 if (list_empty(&bdev->man[0].lru)) in ttm_bo_device_release()
1451 drm_vma_offset_manager_destroy(&bdev->vma_manager); in ttm_bo_device_release()
1457 int ttm_bo_device_init(struct ttm_bo_device *bdev, in ttm_bo_device_init() argument
1466 bdev->driver = driver; in ttm_bo_device_init()
1468 memset(bdev->man, 0, sizeof(bdev->man)); in ttm_bo_device_init()
1474 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); in ttm_bo_device_init()
1478 drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, in ttm_bo_device_init()
1480 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); in ttm_bo_device_init()
1481 INIT_LIST_HEAD(&bdev->ddestroy); in ttm_bo_device_init()
1482 bdev->dev_mapping = mapping; in ttm_bo_device_init()
1483 bdev->glob = glob; in ttm_bo_device_init()
1484 bdev->need_dma32 = need_dma32; in ttm_bo_device_init()
1485 bdev->val_seq = 0; in ttm_bo_device_init()
1487 list_add_tail(&bdev->device_list, &glob->device_list); in ttm_bo_device_init()
1500 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) in ttm_mem_reg_is_pci() argument
1502 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; in ttm_mem_reg_is_pci()
1519 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual_locked() local
1521 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); in ttm_bo_unmap_virtual_locked()
1527 struct ttm_bo_device *bdev = bo->bdev; in ttm_bo_unmap_virtual() local
1528 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; in ttm_bo_unmap_virtual()
1680 if (bo->bdev->driver->swap_notify) in ttm_bo_swapout()
1681 bo->bdev->driver->swap_notify(bo); in ttm_bo_swapout()
1697 void ttm_bo_swapout_all(struct ttm_bo_device *bdev) in ttm_bo_swapout_all() argument
1699 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) in ttm_bo_swapout_all()