Searched refs:bo (Results 1 - 177 of 177) sorted by relevance

/linux-4.1.27/drivers/gpu/drm/qxl/
H A Dqxl_object.c32 struct qxl_bo *bo; qxl_ttm_bo_destroy() local
35 bo = container_of(tbo, struct qxl_bo, tbo); qxl_ttm_bo_destroy()
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_ttm_bo_destroy()
38 qxl_surface_evict(qdev, bo, false); qxl_ttm_bo_destroy()
40 list_del_init(&bo->list); qxl_ttm_bo_destroy()
42 drm_gem_object_release(&bo->gem_base); qxl_ttm_bo_destroy()
43 kfree(bo); qxl_ttm_bo_destroy()
46 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) qxl_ttm_bo_is_qxl_bo() argument
48 if (bo->destroy == &qxl_ttm_bo_destroy) qxl_ttm_bo_is_qxl_bo()
83 struct qxl_bo *bo; qxl_bo_create() local
92 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); qxl_bo_create()
93 if (bo == NULL) qxl_bo_create()
96 r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); qxl_bo_create()
98 kfree(bo); qxl_bo_create()
101 bo->type = domain; qxl_bo_create()
102 bo->pin_count = pinned ? 1 : 0; qxl_bo_create()
103 bo->surface_id = 0; qxl_bo_create()
104 INIT_LIST_HEAD(&bo->list); qxl_bo_create()
107 bo->surf = *surf; qxl_bo_create()
109 qxl_ttm_placement_from_domain(bo, domain, pinned); qxl_bo_create()
111 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, qxl_bo_create()
112 &bo->placement, 0, !kernel, NULL, size, qxl_bo_create()
121 *bo_ptr = bo; qxl_bo_create()
125 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) qxl_bo_kmap() argument
130 if (bo->kptr) { qxl_bo_kmap()
132 *ptr = bo->kptr; qxl_bo_kmap()
135 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); qxl_bo_kmap()
138 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); qxl_bo_kmap()
140 *ptr = bo->kptr; qxl_bo_kmap()
145 struct qxl_bo *bo, int page_offset) qxl_bo_kmap_atomic_page()
147 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kmap_atomic_page()
152 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) qxl_bo_kmap_atomic_page()
154 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) qxl_bo_kmap_atomic_page()
160 ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kmap_atomic_page()
163 return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); qxl_bo_kmap_atomic_page()
165 if (bo->kptr) { qxl_bo_kmap_atomic_page()
166 rptr = bo->kptr + (page_offset * PAGE_SIZE); qxl_bo_kmap_atomic_page()
170 ret = qxl_bo_kmap(bo, &rptr); qxl_bo_kmap_atomic_page()
178 void qxl_bo_kunmap(struct qxl_bo *bo) qxl_bo_kunmap() argument
180 if (bo->kptr == NULL) qxl_bo_kunmap()
182 bo->kptr = NULL; qxl_bo_kunmap()
183 ttm_bo_kunmap(&bo->kmap); qxl_bo_kunmap()
187 struct qxl_bo *bo, void *pmap) qxl_bo_kunmap_atomic_page()
189 struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; qxl_bo_kunmap_atomic_page()
192 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) qxl_bo_kunmap_atomic_page()
194 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) qxl_bo_kunmap_atomic_page()
202 ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); qxl_bo_kunmap_atomic_page()
206 qxl_bo_kunmap(bo); qxl_bo_kunmap_atomic_page()
209 void qxl_bo_unref(struct qxl_bo **bo) qxl_bo_unref() argument
213 if ((*bo) == NULL) qxl_bo_unref()
215 tbo = &((*bo)->tbo); qxl_bo_unref()
218 *bo = NULL; qxl_bo_unref()
221 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) qxl_bo_ref() argument
223 ttm_bo_reference(&bo->tbo); qxl_bo_ref()
224 return bo; qxl_bo_ref()
227 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) qxl_bo_pin() argument
229 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_bo_pin()
232 if (bo->pin_count) { qxl_bo_pin()
233 bo->pin_count++; qxl_bo_pin()
235 *gpu_addr = qxl_bo_gpu_offset(bo); qxl_bo_pin()
238 qxl_ttm_placement_from_domain(bo, domain, true); qxl_bo_pin()
239 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); qxl_bo_pin()
241 bo->pin_count = 1; qxl_bo_pin()
243 *gpu_addr = qxl_bo_gpu_offset(bo); qxl_bo_pin()
246 dev_err(qdev->dev, "%p pin failed\n", bo); qxl_bo_pin()
250 int qxl_bo_unpin(struct qxl_bo *bo) qxl_bo_unpin() argument
252 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_bo_unpin()
255 if (!bo->pin_count) { qxl_bo_unpin()
256 dev_warn(qdev->dev, "%p unpin not necessary\n", bo); qxl_bo_unpin()
259 bo->pin_count--; qxl_bo_unpin()
260 if (bo->pin_count) qxl_bo_unpin()
262 for (i = 0; i < bo->placement.num_placement; i++) qxl_bo_unpin()
263 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; qxl_bo_unpin()
264 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); qxl_bo_unpin()
266 dev_err(qdev->dev, "%p validate failed for unpin\n", bo); qxl_bo_unpin()
272 struct qxl_bo *bo, *n; qxl_bo_force_delete() local
277 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { qxl_bo_force_delete()
280 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, qxl_bo_force_delete()
281 *((unsigned long *)&bo->gem_base.refcount)); qxl_bo_force_delete()
283 list_del_init(&bo->list); qxl_bo_force_delete()
285 /* this should unref the ttm bo */ qxl_bo_force_delete()
286 drm_gem_object_unreference(&bo->gem_base); qxl_bo_force_delete()
301 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) qxl_bo_check_id() argument
304 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { qxl_bo_check_id()
306 ret = qxl_surface_id_alloc(qdev, bo); qxl_bo_check_id()
310 ret = qxl_hw_surface_alloc(qdev, bo, NULL); qxl_bo_check_id()
144 qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) qxl_bo_kmap_atomic_page() argument
186 qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) qxl_bo_kunmap_atomic_page() argument
H A Dqxl_object.h30 static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) qxl_bo_reserve() argument
34 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); qxl_bo_reserve()
37 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_bo_reserve()
38 dev_err(qdev->dev, "%p reserve failed\n", bo); qxl_bo_reserve()
45 static inline void qxl_bo_unreserve(struct qxl_bo *bo) qxl_bo_unreserve() argument
47 ttm_bo_unreserve(&bo->tbo); qxl_bo_unreserve()
50 static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo) qxl_bo_gpu_offset() argument
52 return bo->tbo.offset; qxl_bo_gpu_offset()
55 static inline unsigned long qxl_bo_size(struct qxl_bo *bo) qxl_bo_size() argument
57 return bo->tbo.num_pages << PAGE_SHIFT; qxl_bo_size()
60 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) qxl_bo_mmap_offset() argument
62 return drm_vma_node_offset_addr(&bo->tbo.vma_node); qxl_bo_mmap_offset()
65 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, qxl_bo_wait() argument
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); qxl_bo_wait()
73 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; qxl_bo_wait()
75 bo); qxl_bo_wait()
80 *mem_type = bo->tbo.mem.mem_type; qxl_bo_wait()
82 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); qxl_bo_wait()
83 ttm_bo_unreserve(&bo->tbo); qxl_bo_wait()
92 extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
93 extern void qxl_bo_kunmap(struct qxl_bo *bo);
94 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
95 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
96 extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
97 extern void qxl_bo_unref(struct qxl_bo **bo);
98 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
99 extern int qxl_bo_unpin(struct qxl_bo *bo);
101 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
H A Dqxl_release.c36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
166 struct qxl_bo *bo; qxl_release_free_list() local
170 bo = to_qxl_bo(entry->tv.bo); qxl_release_free_list()
171 qxl_bo_unref(&bo); qxl_release_free_list()
204 struct qxl_bo **bo) qxl_release_bo_alloc()
207 /* pin releases bo's they are too messy to evict */ qxl_release_bo_alloc()
210 bo); qxl_release_bo_alloc()
214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) qxl_release_list_add() argument
219 if (entry->tv.bo == &bo->tbo) qxl_release_list_add()
227 qxl_bo_ref(bo); qxl_release_list_add()
228 entry->tv.bo = &bo->tbo; qxl_release_list_add()
234 static int qxl_release_validate_bo(struct qxl_bo *bo) qxl_release_validate_bo() argument
238 if (!bo->pin_count) { qxl_release_validate_bo()
239 qxl_ttm_placement_from_domain(bo, bo->type, false); qxl_release_validate_bo()
240 ret = ttm_bo_validate(&bo->tbo, &bo->placement, qxl_release_validate_bo()
246 ret = reservation_object_reserve_shared(bo->tbo.resv); qxl_release_validate_bo()
251 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); qxl_release_validate_bo()
273 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); qxl_release_reserve_list() local
275 ret = qxl_release_validate_bo(bo); qxl_release_reserve_list()
303 struct qxl_bo *bo; qxl_alloc_surface_release_reserved() local
310 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); qxl_alloc_surface_release_reserved()
314 qxl_release_list_add(*release, bo); qxl_alloc_surface_release_reserved()
320 qxl_bo_unref(&bo); qxl_alloc_surface_release_reserved()
332 struct qxl_bo *bo; qxl_alloc_release_reserved() local
370 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); qxl_alloc_release_reserved()
376 *rbo = bo; qxl_alloc_release_reserved()
380 qxl_release_list_add(*release, bo); qxl_alloc_release_reserved()
386 qxl_bo_unref(&bo); qxl_alloc_release_reserved()
412 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); qxl_release_map() local
414 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); qxl_release_map()
426 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); qxl_release_unmap() local
430 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); qxl_release_unmap()
435 struct ttm_buffer_object *bo; qxl_release_fence_buffer_objects() local
448 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; qxl_release_fence_buffer_objects()
449 bdev = bo->bdev; qxl_release_fence_buffer_objects()
461 glob = bo->glob; qxl_release_fence_buffer_objects()
466 bo = entry->bo; qxl_release_fence_buffer_objects()
467 qbo = to_qxl_bo(bo); qxl_release_fence_buffer_objects()
469 reservation_object_add_shared_fence(bo->resv, &release->base); qxl_release_fence_buffer_objects()
470 ttm_bo_add_to_lru(bo); qxl_release_fence_buffer_objects()
471 __ttm_bo_unreserve(bo); qxl_release_fence_buffer_objects()
203 qxl_release_bo_alloc(struct qxl_device *qdev, struct qxl_bo **bo) qxl_release_bo_alloc() argument
H A Dqxl_ttm.c111 struct ttm_buffer_object *bo; qxl_ttm_fault() local
114 bo = (struct ttm_buffer_object *)vma->vm_private_data; qxl_ttm_fault()
115 if (bo == NULL) qxl_ttm_fault()
187 static void qxl_evict_flags(struct ttm_buffer_object *bo, qxl_evict_flags() argument
197 if (!qxl_ttm_bo_is_qxl_bo(bo)) { qxl_evict_flags()
204 qbo = container_of(bo, struct qxl_bo, tbo); qxl_evict_flags()
209 static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) qxl_verify_access() argument
211 struct qxl_bo *qbo = to_qxl_bo(bo); qxl_verify_access()
337 static void qxl_move_null(struct ttm_buffer_object *bo, qxl_move_null() argument
340 struct ttm_mem_reg *old_mem = &bo->mem; qxl_move_null()
347 static int qxl_bo_move(struct ttm_buffer_object *bo, qxl_bo_move() argument
352 struct ttm_mem_reg *old_mem = &bo->mem; qxl_bo_move()
353 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_bo_move()
354 qxl_move_null(bo, new_mem); qxl_bo_move()
357 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); qxl_bo_move()
360 static void qxl_bo_move_notify(struct ttm_buffer_object *bo, qxl_bo_move_notify() argument
366 if (!qxl_ttm_bo_is_qxl_bo(bo)) qxl_bo_move_notify()
368 qbo = container_of(bo, struct qxl_bo, tbo); qxl_bo_move_notify()
371 if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id) qxl_bo_move_notify()
H A Dqxl_debugfs.c58 struct qxl_bo *bo; qxl_debugfs_buffers_info() local
60 list_for_each_entry(bo, &qdev->gem.objects, list) { qxl_debugfs_buffers_info()
65 fobj = rcu_dereference(bo->tbo.resv->fence); qxl_debugfs_buffers_info()
70 (unsigned long)bo->gem_base.size, qxl_debugfs_buffers_info()
71 bo->pin_count, rel); qxl_debugfs_buffers_info()
H A Dqxl_display.c228 struct qxl_bo *bo = gem_to_qxl_bo(qfb_src->obj); qxl_crtc_page_flip() local
242 bo->is_primary = true; qxl_crtc_page_flip()
244 ret = qxl_bo_reserve(bo, false); qxl_crtc_page_flip()
248 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, qxl_crtc_page_flip()
260 qxl_bo_unreserve(bo); qxl_crtc_page_flip()
388 /* finish with the userspace bo */ qxl_crtc_cursor_set2()
604 struct qxl_bo *bo, *old_bo = NULL; qxl_crtc_mode_set() local
619 bo = gem_to_qxl_bo(qfb->obj); qxl_crtc_mode_set()
626 if (bo->is_primary == false) qxl_crtc_mode_set()
629 if (bo->surf.stride * bo->surf.height > qdev->vram_size) { qxl_crtc_mode_set()
634 ret = qxl_bo_reserve(bo, false); qxl_crtc_mode_set()
637 ret = qxl_bo_pin(bo, bo->type, NULL); qxl_crtc_mode_set()
639 qxl_bo_unreserve(bo); qxl_crtc_mode_set()
642 qxl_bo_unreserve(bo); qxl_crtc_mode_set()
647 bo->surf.width, bo->surf.height, qxl_crtc_mode_set()
648 bo->surf.stride, bo->surf.format); qxl_crtc_mode_set()
649 qxl_io_create_primary(qdev, 0, bo); qxl_crtc_mode_set()
650 bo->is_primary = true; qxl_crtc_mode_set()
653 if (bo->is_primary) { qxl_crtc_mode_set()
654 DRM_DEBUG_KMS("setting surface_id to 0 for primary surface %d on crtc %d\n", bo->surface_id, qcrtc->index); qxl_crtc_mode_set()
657 surf_id = bo->surface_id; qxl_crtc_mode_set()
660 if (old_bo && old_bo != bo) { qxl_crtc_mode_set()
692 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); qxl_crtc_disable() local
694 ret = qxl_bo_reserve(bo, false); qxl_crtc_disable()
695 qxl_bo_unpin(bo); qxl_crtc_disable()
696 qxl_bo_unreserve(bo); qxl_crtc_disable()
H A Dqxl_image.c45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); qxl_allocate_chunk()
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); qxl_image_alloc_objects()
78 qxl_bo_unref(&image->bo); qxl_image_alloc_objects()
91 qxl_bo_unref(&chunk->bo); qxl_image_free_objects()
95 qxl_bo_unref(&dimage->bo); qxl_image_free_objects()
122 chunk_bo = drv_chunk->bo; qxl_image_init_helper()
189 image_bo = dimage->bo; qxl_image_init_helper()
H A Dqxl_cmd.c185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); qxl_push_command_ring_release()
198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); qxl_push_cursor_ring_release()
261 struct qxl_bo *bo; qxl_alloc_bo_reserved() local
265 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); qxl_alloc_bo_reserved()
270 ret = qxl_release_list_add(release, bo); qxl_alloc_bo_reserved()
274 *_bo = bo; qxl_alloc_bo_reserved()
277 qxl_bo_unref(&bo); qxl_alloc_bo_reserved()
380 unsigned offset, struct qxl_bo *bo) qxl_io_create_primary()
387 create->format = bo->surf.format; qxl_io_create_primary()
388 create->width = bo->surf.width; qxl_io_create_primary()
389 create->height = bo->surf.height; qxl_io_create_primary()
390 create->stride = bo->surf.stride; qxl_io_create_primary()
391 create->mem = qxl_bo_physical_address(qdev, bo, offset); qxl_io_create_primary()
394 bo->kptr); qxl_io_create_primary()
528 /* no need to add a release to the fence for this surface bo, qxl_hw_surface_alloc()
379 qxl_io_create_primary(struct qxl_device *qdev, unsigned offset, struct qxl_bo *bo) qxl_io_create_primary() argument
H A Dqxl_drv.h204 struct qxl_bo *bo; member in struct:qxl_drm_chunk
208 struct qxl_bo *bo; member in struct:qxl_drm_image
371 qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, qxl_bo_physical_address() argument
374 int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot; qxl_bo_physical_address()
378 return slot->high_bits | (bo->tbo.offset + offset); qxl_bo_physical_address()
426 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
463 struct qxl_bo *bo);
482 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
512 struct qxl_bo *bo,
573 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
H A Dqxl_draw.c221 ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); qxl_draw_opaque_fb()
225 qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); qxl_draw_opaque_fb()
243 qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_draw_opaque_fb()
268 struct qxl_bo *bo, qxl_draw_dirty_fb()
341 ret = qxl_bo_kmap(bo, (void **)&surface_base); qxl_draw_dirty_fb()
348 qxl_bo_kunmap(bo); qxl_draw_dirty_fb()
374 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_draw_dirty_fb()
266 qxl_draw_dirty_fb(struct qxl_device *qdev, struct qxl_framebuffer *qxl_fb, struct qxl_bo *bo, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips, int inc) qxl_draw_dirty_fb() argument
H A Dqxl_ioctl.c31 * This is wasteful since bo's are page aligned.
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
238 /* reserve and validate the reloc dst bo */ qxl_process_single_command()
415 /* work out size allocate bo with handle */ qxl_alloc_surf_ioctl()
H A Dqxl_kms.c183 DRM_ERROR("bo init failed %d\n", r); qxl_device_init()
/linux-4.1.27/drivers/gpu/drm/tegra/
H A Dgem.c23 static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo) host1x_to_tegra_bo() argument
25 return container_of(bo, struct tegra_bo, base); host1x_to_tegra_bo()
28 static void tegra_bo_put(struct host1x_bo *bo) tegra_bo_put() argument
30 struct tegra_bo *obj = host1x_to_tegra_bo(bo); tegra_bo_put()
38 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) tegra_bo_pin() argument
40 struct tegra_bo *obj = host1x_to_tegra_bo(bo); tegra_bo_pin()
45 static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) tegra_bo_unpin() argument
49 static void *tegra_bo_mmap(struct host1x_bo *bo) tegra_bo_mmap() argument
51 struct tegra_bo *obj = host1x_to_tegra_bo(bo); tegra_bo_mmap()
56 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) tegra_bo_munmap() argument
60 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page) tegra_bo_kmap() argument
62 struct tegra_bo *obj = host1x_to_tegra_bo(bo); tegra_bo_kmap()
67 static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page, tegra_bo_kunmap() argument
72 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) tegra_bo_get() argument
74 struct tegra_bo *obj = host1x_to_tegra_bo(bo); tegra_bo_get()
81 return bo; tegra_bo_get()
95 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) tegra_bo_iommu_map() argument
100 if (bo->mm) tegra_bo_iommu_map()
103 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); tegra_bo_iommu_map()
104 if (!bo->mm) tegra_bo_iommu_map()
107 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, tegra_bo_iommu_map()
115 bo->paddr = bo->mm->start; tegra_bo_iommu_map()
117 err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, tegra_bo_iommu_map()
118 bo->sgt->nents, prot); tegra_bo_iommu_map()
124 bo->size = err; tegra_bo_iommu_map()
129 drm_mm_remove_node(bo->mm); tegra_bo_iommu_map()
131 kfree(bo->mm); tegra_bo_iommu_map()
135 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) tegra_bo_iommu_unmap() argument
137 if (!bo->mm) tegra_bo_iommu_unmap()
140 iommu_unmap(tegra->domain, bo->paddr, bo->size); tegra_bo_iommu_unmap()
141 drm_mm_remove_node(bo->mm); tegra_bo_iommu_unmap()
142 kfree(bo->mm); tegra_bo_iommu_unmap()
150 struct tegra_bo *bo; tegra_bo_alloc_object() local
153 bo = kzalloc(sizeof(*bo), GFP_KERNEL); tegra_bo_alloc_object()
154 if (!bo) tegra_bo_alloc_object()
157 host1x_bo_init(&bo->base, &tegra_bo_ops); tegra_bo_alloc_object()
160 err = drm_gem_object_init(drm, &bo->gem, size); tegra_bo_alloc_object()
164 err = drm_gem_create_mmap_offset(&bo->gem); tegra_bo_alloc_object()
168 return bo; tegra_bo_alloc_object()
171 drm_gem_object_release(&bo->gem); tegra_bo_alloc_object()
173 kfree(bo); tegra_bo_alloc_object()
177 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) tegra_bo_free() argument
179 if (bo->pages) { tegra_bo_free()
180 drm_gem_put_pages(&bo->gem, bo->pages, true, true); tegra_bo_free()
181 sg_free_table(bo->sgt); tegra_bo_free()
182 kfree(bo->sgt); tegra_bo_free()
183 } else if (bo->vaddr) { tegra_bo_free()
184 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, tegra_bo_free()
185 bo->paddr); tegra_bo_free()
189 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) tegra_bo_get_pages() argument
195 bo->pages = drm_gem_get_pages(&bo->gem); tegra_bo_get_pages()
196 if (IS_ERR(bo->pages)) tegra_bo_get_pages()
197 return PTR_ERR(bo->pages); tegra_bo_get_pages()
199 bo->num_pages = bo->gem.size >> PAGE_SHIFT; tegra_bo_get_pages()
201 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); tegra_bo_get_pages()
220 bo->sgt = sgt; tegra_bo_get_pages()
229 drm_gem_put_pages(&bo->gem, bo->pages, false, false); tegra_bo_get_pages()
233 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) tegra_bo_alloc() argument
239 err = tegra_bo_get_pages(drm, bo); tegra_bo_alloc()
243 err = tegra_bo_iommu_map(tegra, bo); tegra_bo_alloc()
245 tegra_bo_free(drm, bo); tegra_bo_alloc()
249 size_t size = bo->gem.size; tegra_bo_alloc()
251 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, tegra_bo_alloc()
253 if (!bo->vaddr) { tegra_bo_alloc()
267 struct tegra_bo *bo; tegra_bo_create() local
270 bo = tegra_bo_alloc_object(drm, size); tegra_bo_create()
271 if (IS_ERR(bo)) tegra_bo_create()
272 return bo; tegra_bo_create()
274 err = tegra_bo_alloc(drm, bo); tegra_bo_create()
279 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; tegra_bo_create()
282 bo->flags |= TEGRA_BO_BOTTOM_UP; tegra_bo_create()
284 return bo; tegra_bo_create()
287 drm_gem_object_release(&bo->gem); tegra_bo_create()
288 kfree(bo); tegra_bo_create()
298 struct tegra_bo *bo; tegra_bo_create_with_handle() local
301 bo = tegra_bo_create(drm, size, flags); tegra_bo_create_with_handle()
302 if (IS_ERR(bo)) tegra_bo_create_with_handle()
303 return bo; tegra_bo_create_with_handle()
305 err = drm_gem_handle_create(file, &bo->gem, handle); tegra_bo_create_with_handle()
307 tegra_bo_free_object(&bo->gem); tegra_bo_create_with_handle()
311 drm_gem_object_unreference_unlocked(&bo->gem); tegra_bo_create_with_handle()
313 return bo; tegra_bo_create_with_handle()
321 struct tegra_bo *bo; tegra_bo_import() local
324 bo = tegra_bo_alloc_object(drm, buf->size); tegra_bo_import()
325 if (IS_ERR(bo)) tegra_bo_import()
326 return bo; tegra_bo_import()
336 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE); tegra_bo_import()
337 if (!bo->sgt) { tegra_bo_import()
342 if (IS_ERR(bo->sgt)) { tegra_bo_import()
343 err = PTR_ERR(bo->sgt); tegra_bo_import()
348 err = tegra_bo_iommu_map(tegra, bo); tegra_bo_import()
352 if (bo->sgt->nents > 1) { tegra_bo_import()
357 bo->paddr = sg_dma_address(bo->sgt->sgl); tegra_bo_import()
360 bo->gem.import_attach = attach; tegra_bo_import()
362 return bo; tegra_bo_import()
365 if (!IS_ERR_OR_NULL(bo->sgt)) tegra_bo_import()
366 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE); tegra_bo_import()
371 drm_gem_object_release(&bo->gem); tegra_bo_import()
372 kfree(bo); tegra_bo_import()
379 struct tegra_bo *bo = to_tegra_bo(gem); tegra_bo_free_object() local
382 tegra_bo_iommu_unmap(tegra, bo); tegra_bo_free_object()
385 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, tegra_bo_free_object()
389 tegra_bo_free(gem->dev, bo); tegra_bo_free_object()
393 kfree(bo); tegra_bo_free_object()
401 struct tegra_bo *bo; tegra_bo_dumb_create() local
406 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, tegra_bo_dumb_create()
408 if (IS_ERR(bo)) tegra_bo_dumb_create()
409 return PTR_ERR(bo); tegra_bo_dumb_create()
418 struct tegra_bo *bo; tegra_bo_dumb_map_offset() local
429 bo = to_tegra_bo(gem); tegra_bo_dumb_map_offset()
431 *offset = drm_vma_node_offset_addr(&bo->gem.vma_node); tegra_bo_dumb_map_offset()
443 struct tegra_bo *bo = to_tegra_bo(gem); tegra_bo_fault() local
448 if (!bo->pages) tegra_bo_fault()
452 page = bo->pages[offset]; tegra_bo_fault()
479 struct tegra_bo *bo; tegra_drm_mmap() local
487 bo = to_tegra_bo(gem); tegra_drm_mmap()
489 if (!bo->pages) { tegra_drm_mmap()
495 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, tegra_drm_mmap()
496 bo->paddr, gem->size); tegra_drm_mmap()
520 struct tegra_bo *bo = to_tegra_bo(gem); tegra_gem_prime_map_dma_buf() local
527 if (bo->pages) { tegra_gem_prime_map_dma_buf()
531 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) tegra_gem_prime_map_dma_buf()
534 for_each_sg(sgt->sgl, sg, bo->num_pages, i) tegra_gem_prime_map_dma_buf()
535 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); tegra_gem_prime_map_dma_buf()
543 sg_dma_address(sgt->sgl) = bo->paddr; tegra_gem_prime_map_dma_buf()
560 struct tegra_bo *bo = to_tegra_bo(gem); tegra_gem_prime_unmap_dma_buf() local
562 if (bo->pages) tegra_gem_prime_unmap_dma_buf()
604 struct tegra_bo *bo = to_tegra_bo(gem); tegra_gem_prime_vmap() local
606 return bo->vaddr; tegra_gem_prime_vmap()
643 struct tegra_bo *bo; tegra_gem_prime_import() local
654 bo = tegra_bo_import(drm, buf); tegra_gem_prime_import()
655 if (IS_ERR(bo)) tegra_gem_prime_import()
656 return ERR_CAST(bo); tegra_gem_prime_import()
658 return &bo->gem; tegra_gem_prime_import()
H A Dfb.c66 struct tegra_bo *bo = fb->planes[i]; tegra_fb_destroy() local
68 if (bo) { tegra_fb_destroy()
69 if (bo->pages && bo->vaddr) tegra_fb_destroy()
70 vunmap(bo->vaddr); tegra_fb_destroy()
72 drm_gem_object_unreference_unlocked(&bo->gem); tegra_fb_destroy()
208 struct tegra_bo *bo; tegra_fbdev_probe() local
223 bo = tegra_bo_create(drm, size, 0); tegra_fbdev_probe()
224 if (IS_ERR(bo)) tegra_fbdev_probe()
225 return PTR_ERR(bo); tegra_fbdev_probe()
230 drm_gem_object_unreference_unlocked(&bo->gem); tegra_fbdev_probe()
234 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); tegra_fbdev_probe()
239 drm_gem_object_unreference_unlocked(&bo->gem); tegra_fbdev_probe()
263 if (bo->pages) { tegra_fbdev_probe()
264 bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, tegra_fbdev_probe()
266 if (!bo->vaddr) { tegra_fbdev_probe()
273 drm->mode_config.fb_base = (resource_size_t)bo->paddr; tegra_fbdev_probe()
274 info->screen_base = (void __iomem *)bo->vaddr + offset; tegra_fbdev_probe()
276 info->fix.smem_start = (unsigned long)(bo->paddr + offset); tegra_fbdev_probe()
H A Ddrm.c265 struct tegra_bo *bo; host1x_bo_lookup() local
275 bo = to_tegra_bo(gem); host1x_bo_lookup()
276 return &bo->base; host1x_bo_lookup()
307 dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf); host1x_reloc_copy_from_user()
308 if (!dest->cmdbuf.bo) host1x_reloc_copy_from_user()
311 dest->target.bo = host1x_bo_lookup(drm, file, target); host1x_reloc_copy_from_user()
312 if (!dest->target.bo) host1x_reloc_copy_from_user()
352 struct host1x_bo *bo; tegra_drm_submit() local
359 bo = host1x_bo_lookup(drm, file, cmdbuf.handle); tegra_drm_submit()
360 if (!bo) { tegra_drm_submit()
365 host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset); tegra_drm_submit()
442 struct tegra_bo *bo; tegra_gem_create() local
444 bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags, tegra_gem_create()
446 if (IS_ERR(bo)) tegra_gem_create()
447 return PTR_ERR(bo); tegra_gem_create()
457 struct tegra_bo *bo; tegra_gem_mmap() local
463 bo = to_tegra_bo(gem); tegra_gem_mmap()
465 args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node); tegra_gem_mmap()
636 struct tegra_bo *bo; tegra_gem_set_tiling() local
672 bo = to_tegra_bo(gem); tegra_gem_set_tiling()
674 bo->tiling.mode = mode; tegra_gem_set_tiling()
675 bo->tiling.value = value; tegra_gem_set_tiling()
687 struct tegra_bo *bo; tegra_gem_get_tiling() local
694 bo = to_tegra_bo(gem); tegra_gem_get_tiling()
696 switch (bo->tiling.mode) { tegra_gem_get_tiling()
709 args->value = bo->tiling.value; tegra_gem_get_tiling()
727 struct tegra_bo *bo; tegra_gem_set_flags() local
736 bo = to_tegra_bo(gem); tegra_gem_set_flags()
737 bo->flags = 0; tegra_gem_set_flags()
740 bo->flags |= TEGRA_BO_BOTTOM_UP; tegra_gem_set_flags()
752 struct tegra_bo *bo; tegra_gem_get_flags() local
758 bo = to_tegra_bo(gem); tegra_gem_get_flags()
761 if (bo->flags & TEGRA_BO_BOTTOM_UP) tegra_gem_get_flags()
H A Ddc.c584 struct tegra_bo *bo = tegra_fb_get_plane(fb, i); tegra_plane_atomic_update() local
586 window.base[i] = bo->paddr + fb->offsets[i]; tegra_plane_atomic_update()
705 struct tegra_bo *bo = tegra_fb_get_plane(plane->state->fb, 0); tegra_cursor_atomic_update() local
737 value |= (bo->paddr >> 10) & 0x3fffff; tegra_cursor_atomic_update()
741 value = (bo->paddr >> 32) & 0x3; tegra_cursor_atomic_update()
951 struct tegra_bo *bo; tegra_dc_finish_page_flip() local
960 bo = tegra_fb_get_plane(crtc->primary->fb, 0); tegra_dc_finish_page_flip()
972 if (base == bo->paddr + crtc->primary->fb->offsets[0]) { tegra_dc_finish_page_flip()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_object.c43 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
50 static void radeon_update_memory_usage(struct radeon_bo *bo, radeon_update_memory_usage() argument
53 struct radeon_device *rdev = bo->rdev; radeon_update_memory_usage()
54 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; radeon_update_memory_usage()
74 struct radeon_bo *bo; radeon_ttm_bo_destroy() local
76 bo = container_of(tbo, struct radeon_bo, tbo); radeon_ttm_bo_destroy()
78 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); radeon_ttm_bo_destroy()
80 mutex_lock(&bo->rdev->gem.mutex); radeon_ttm_bo_destroy()
81 list_del_init(&bo->list); radeon_ttm_bo_destroy()
82 mutex_unlock(&bo->rdev->gem.mutex); radeon_ttm_bo_destroy()
83 radeon_bo_clear_surface_reg(bo); radeon_ttm_bo_destroy()
84 WARN_ON(!list_empty(&bo->va)); radeon_ttm_bo_destroy()
85 drm_gem_object_release(&bo->gem_base); radeon_ttm_bo_destroy()
86 kfree(bo); radeon_ttm_bo_destroy()
89 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) radeon_ttm_bo_is_radeon_bo() argument
91 if (bo->destroy == &radeon_ttm_bo_destroy) radeon_ttm_bo_is_radeon_bo()
184 struct radeon_bo *bo; radeon_bo_create() local
204 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); radeon_bo_create()
205 if (bo == NULL) radeon_bo_create()
207 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); radeon_bo_create()
209 kfree(bo); radeon_bo_create()
212 bo->rdev = rdev; radeon_bo_create()
213 bo->surface_reg = -1; radeon_bo_create()
214 INIT_LIST_HEAD(&bo->list); radeon_bo_create()
215 INIT_LIST_HEAD(&bo->va); radeon_bo_create()
216 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | radeon_bo_create()
220 bo->flags = flags; radeon_bo_create()
223 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); radeon_bo_create()
229 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); radeon_bo_create()
241 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); radeon_bo_create()
247 bo->flags &= ~RADEON_GEM_GTT_WC; radeon_bo_create()
250 radeon_ttm_placement_from_domain(bo, domain); radeon_bo_create()
253 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, radeon_bo_create()
254 &bo->placement, page_align, !kernel, NULL, radeon_bo_create()
260 *bo_ptr = bo; radeon_bo_create()
262 trace_radeon_bo_create(bo); radeon_bo_create()
267 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) radeon_bo_kmap() argument
272 if (bo->kptr) { radeon_bo_kmap()
274 *ptr = bo->kptr; radeon_bo_kmap()
278 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); radeon_bo_kmap()
282 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); radeon_bo_kmap()
284 *ptr = bo->kptr; radeon_bo_kmap()
286 radeon_bo_check_tiling(bo, 0, 0); radeon_bo_kmap()
290 void radeon_bo_kunmap(struct radeon_bo *bo) radeon_bo_kunmap() argument
292 if (bo->kptr == NULL) radeon_bo_kunmap()
294 bo->kptr = NULL; radeon_bo_kunmap()
295 radeon_bo_check_tiling(bo, 0, 0); radeon_bo_kunmap()
296 ttm_bo_kunmap(&bo->kmap); radeon_bo_kunmap()
299 struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) radeon_bo_ref() argument
301 if (bo == NULL) radeon_bo_ref()
304 ttm_bo_reference(&bo->tbo); radeon_bo_ref()
305 return bo; radeon_bo_ref()
308 void radeon_bo_unref(struct radeon_bo **bo) radeon_bo_unref() argument
313 if ((*bo) == NULL) radeon_bo_unref()
315 rdev = (*bo)->rdev; radeon_bo_unref()
316 tbo = &((*bo)->tbo); radeon_bo_unref()
319 *bo = NULL; radeon_bo_unref()
322 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, radeon_bo_pin_restricted() argument
327 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_bo_pin_restricted()
330 if (bo->pin_count) { radeon_bo_pin_restricted()
331 bo->pin_count++; radeon_bo_pin_restricted()
333 *gpu_addr = radeon_bo_gpu_offset(bo); radeon_bo_pin_restricted()
339 domain_start = bo->rdev->mc.vram_start; radeon_bo_pin_restricted()
341 domain_start = bo->rdev->mc.gtt_start; radeon_bo_pin_restricted()
343 (radeon_bo_gpu_offset(bo) - domain_start)); radeon_bo_pin_restricted()
348 radeon_ttm_placement_from_domain(bo, domain); radeon_bo_pin_restricted()
349 for (i = 0; i < bo->placement.num_placement; i++) { radeon_bo_pin_restricted()
351 if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && radeon_bo_pin_restricted()
352 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && radeon_bo_pin_restricted()
353 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) radeon_bo_pin_restricted()
354 bo->placements[i].lpfn = radeon_bo_pin_restricted()
355 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; radeon_bo_pin_restricted()
357 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; radeon_bo_pin_restricted()
359 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; radeon_bo_pin_restricted()
362 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_bo_pin_restricted()
364 bo->pin_count = 1; radeon_bo_pin_restricted()
366 *gpu_addr = radeon_bo_gpu_offset(bo); radeon_bo_pin_restricted()
368 bo->rdev->vram_pin_size += radeon_bo_size(bo); radeon_bo_pin_restricted()
370 bo->rdev->gart_pin_size += radeon_bo_size(bo); radeon_bo_pin_restricted()
372 dev_err(bo->rdev->dev, "%p pin failed\n", bo); radeon_bo_pin_restricted()
377 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) radeon_bo_pin() argument
379 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); radeon_bo_pin()
382 int radeon_bo_unpin(struct radeon_bo *bo) radeon_bo_unpin() argument
386 if (!bo->pin_count) { radeon_bo_unpin()
387 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); radeon_bo_unpin()
390 bo->pin_count--; radeon_bo_unpin()
391 if (bo->pin_count) radeon_bo_unpin()
393 for (i = 0; i < bo->placement.num_placement; i++) { radeon_bo_unpin()
394 bo->placements[i].lpfn = 0; radeon_bo_unpin()
395 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; radeon_bo_unpin()
397 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_bo_unpin()
399 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) radeon_bo_unpin()
400 bo->rdev->vram_pin_size -= radeon_bo_size(bo); radeon_bo_unpin()
402 bo->rdev->gart_pin_size -= radeon_bo_size(bo); radeon_bo_unpin()
404 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); radeon_bo_unpin()
422 struct radeon_bo *bo, *n; radeon_bo_force_delete() local
428 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { radeon_bo_force_delete()
431 &bo->gem_base, bo, (unsigned long)bo->gem_base.size, radeon_bo_force_delete()
432 *((unsigned long *)&bo->gem_base.refcount)); radeon_bo_force_delete()
433 mutex_lock(&bo->rdev->gem.mutex); radeon_bo_force_delete()
434 list_del_init(&bo->list); radeon_bo_force_delete()
435 mutex_unlock(&bo->rdev->gem.mutex); radeon_bo_force_delete()
436 /* this should unref the ttm bo */ radeon_bo_force_delete()
437 drm_gem_object_unreference(&bo->gem_base); radeon_bo_force_delete()
532 struct radeon_bo *bo = lobj->robj; list_for_each_entry() local
533 if (!bo->pin_count) { list_for_each_entry()
537 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); list_for_each_entry()
543 * any size, because it doesn't take the current "bo" list_for_each_entry()
555 radeon_ttm_placement_from_domain(bo, domain); list_for_each_entry()
557 radeon_uvd_force_into_uvd_segment(bo, allowed); list_for_each_entry()
560 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); list_for_each_entry()
574 lobj->gpu_offset = radeon_bo_gpu_offset(bo); list_for_each_entry()
575 lobj->tiling_flags = bo->tiling_flags; list_for_each_entry()
586 int radeon_bo_get_surface_reg(struct radeon_bo *bo) radeon_bo_get_surface_reg() argument
588 struct radeon_device *rdev = bo->rdev; radeon_bo_get_surface_reg()
594 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_get_surface_reg()
596 if (!bo->tiling_flags) radeon_bo_get_surface_reg()
599 if (bo->surface_reg >= 0) { radeon_bo_get_surface_reg()
600 reg = &rdev->surface_regs[bo->surface_reg]; radeon_bo_get_surface_reg()
601 i = bo->surface_reg; radeon_bo_get_surface_reg()
609 if (!reg->bo) radeon_bo_get_surface_reg()
612 old_object = reg->bo; radeon_bo_get_surface_reg()
623 old_object = reg->bo; radeon_bo_get_surface_reg()
631 bo->surface_reg = i; radeon_bo_get_surface_reg()
632 reg->bo = bo; radeon_bo_get_surface_reg()
635 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, radeon_bo_get_surface_reg()
636 bo->tbo.mem.start << PAGE_SHIFT, radeon_bo_get_surface_reg()
637 bo->tbo.num_pages << PAGE_SHIFT); radeon_bo_get_surface_reg()
641 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) radeon_bo_clear_surface_reg() argument
643 struct radeon_device *rdev = bo->rdev; radeon_bo_clear_surface_reg()
646 if (bo->surface_reg == -1) radeon_bo_clear_surface_reg()
649 reg = &rdev->surface_regs[bo->surface_reg]; radeon_bo_clear_surface_reg()
650 radeon_clear_surface_reg(rdev, bo->surface_reg); radeon_bo_clear_surface_reg()
652 reg->bo = NULL; radeon_bo_clear_surface_reg()
653 bo->surface_reg = -1; radeon_bo_clear_surface_reg()
656 int radeon_bo_set_tiling_flags(struct radeon_bo *bo, radeon_bo_set_tiling_flags() argument
659 struct radeon_device *rdev = bo->rdev; radeon_bo_set_tiling_flags()
707 r = radeon_bo_reserve(bo, false); radeon_bo_set_tiling_flags()
710 bo->tiling_flags = tiling_flags; radeon_bo_set_tiling_flags()
711 bo->pitch = pitch; radeon_bo_set_tiling_flags()
712 radeon_bo_unreserve(bo); radeon_bo_set_tiling_flags()
716 void radeon_bo_get_tiling_flags(struct radeon_bo *bo, radeon_bo_get_tiling_flags() argument
720 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_get_tiling_flags()
723 *tiling_flags = bo->tiling_flags; radeon_bo_get_tiling_flags()
725 *pitch = bo->pitch; radeon_bo_get_tiling_flags()
728 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, radeon_bo_check_tiling() argument
732 lockdep_assert_held(&bo->tbo.resv->lock.base); radeon_bo_check_tiling()
734 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) radeon_bo_check_tiling()
738 radeon_bo_clear_surface_reg(bo); radeon_bo_check_tiling()
742 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { radeon_bo_check_tiling()
746 if (bo->surface_reg >= 0) radeon_bo_check_tiling()
747 radeon_bo_clear_surface_reg(bo); radeon_bo_check_tiling()
751 if ((bo->surface_reg >= 0) && !has_moved) radeon_bo_check_tiling()
754 return radeon_bo_get_surface_reg(bo); radeon_bo_check_tiling()
757 void radeon_bo_move_notify(struct ttm_buffer_object *bo, radeon_bo_move_notify() argument
762 if (!radeon_ttm_bo_is_radeon_bo(bo)) radeon_bo_move_notify()
765 rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_move_notify()
773 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); radeon_bo_move_notify()
777 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) radeon_bo_fault_reserve_notify() argument
784 if (!radeon_ttm_bo_is_radeon_bo(bo)) radeon_bo_fault_reserve_notify()
786 rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_fault_reserve_notify()
789 if (bo->mem.mem_type != TTM_PL_VRAM) radeon_bo_fault_reserve_notify()
792 size = bo->mem.num_pages << PAGE_SHIFT; radeon_bo_fault_reserve_notify()
793 offset = bo->mem.start << PAGE_SHIFT; radeon_bo_fault_reserve_notify()
806 r = ttm_bo_validate(bo, &rbo->placement, false, false); radeon_bo_fault_reserve_notify()
809 return ttm_bo_validate(bo, &rbo->placement, false, false); radeon_bo_fault_reserve_notify()
814 offset = bo->mem.start << PAGE_SHIFT; radeon_bo_fault_reserve_notify()
822 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) radeon_bo_wait() argument
826 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); radeon_bo_wait()
830 *mem_type = bo->tbo.mem.mem_type; radeon_bo_wait()
832 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); radeon_bo_wait()
833 ttm_bo_unreserve(&bo->tbo); radeon_bo_wait()
840 * @bo: buffer object in question
845 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, radeon_bo_fence() argument
848 struct reservation_object *resv = bo->tbo.resv; radeon_bo_fence()
H A Dradeon_prime.c34 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_get_sg_table() local
35 int npages = bo->tbo.num_pages; radeon_gem_prime_get_sg_table()
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); radeon_gem_prime_get_sg_table()
42 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_vmap() local
45 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, radeon_gem_prime_vmap()
46 &bo->dma_buf_vmap); radeon_gem_prime_vmap()
50 return bo->dma_buf_vmap.virtual; radeon_gem_prime_vmap()
55 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_vunmap() local
57 ttm_bo_kunmap(&bo->dma_buf_vmap); radeon_gem_prime_vunmap()
66 struct radeon_bo *bo; radeon_gem_prime_import_sg_table() local
71 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); radeon_gem_prime_import_sg_table()
77 list_add_tail(&bo->list, &rdev->gem.objects); radeon_gem_prime_import_sg_table()
80 return &bo->gem_base; radeon_gem_prime_import_sg_table()
85 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_pin() local
88 ret = radeon_bo_reserve(bo, false); radeon_gem_prime_pin()
93 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); radeon_gem_prime_pin()
94 radeon_bo_unreserve(bo); radeon_gem_prime_pin()
100 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_unpin() local
103 ret = radeon_bo_reserve(bo, false); radeon_gem_prime_unpin()
107 radeon_bo_unpin(bo); radeon_gem_prime_unpin()
108 radeon_bo_unreserve(bo); radeon_gem_prime_unpin()
114 struct radeon_bo *bo = gem_to_radeon_bo(obj); radeon_gem_prime_res_obj() local
116 return bo->tbo.resv; radeon_gem_prime_res_obj()
123 struct radeon_bo *bo = gem_to_radeon_bo(gobj); radeon_gem_prime_export() local
124 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_gem_prime_export()
H A Dradeon_object.h56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
64 static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) radeon_bo_reserve() argument
68 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL); radeon_bo_reserve()
71 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); radeon_bo_reserve()
77 static inline void radeon_bo_unreserve(struct radeon_bo *bo) radeon_bo_unreserve() argument
79 ttm_bo_unreserve(&bo->tbo); radeon_bo_unreserve()
83 * radeon_bo_gpu_offset - return GPU offset of bo
84 * @bo: radeon object for which we query the offset
91 static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) radeon_bo_gpu_offset() argument
93 return bo->tbo.offset; radeon_bo_gpu_offset()
96 static inline unsigned long radeon_bo_size(struct radeon_bo *bo) radeon_bo_size() argument
98 return bo->tbo.num_pages << PAGE_SHIFT; radeon_bo_size()
101 static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) radeon_bo_ngpu_pages() argument
103 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; radeon_bo_ngpu_pages()
106 static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo) radeon_bo_gpu_page_alignment() argument
108 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; radeon_bo_gpu_page_alignment()
112 * radeon_bo_mmap_offset - return mmap offset of bo
113 * @bo: radeon object for which we query the offset
117 static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) radeon_bo_mmap_offset() argument
119 return drm_vma_node_offset_addr(&bo->tbo.vma_node); radeon_bo_mmap_offset()
122 extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
131 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
132 extern void radeon_bo_kunmap(struct radeon_bo *bo);
133 extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
134 extern void radeon_bo_unref(struct radeon_bo **bo);
135 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
136 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
138 extern int radeon_bo_unpin(struct radeon_bo *bo);
146 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
148 extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
150 extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
152 extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
154 extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
155 extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
156 extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
H A Dradeon_mn.c73 struct radeon_bo *bo, *next_bo; radeon_mn_destroy() local
82 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { radeon_mn_destroy()
83 bo->mn = NULL; radeon_mn_destroy()
84 list_del_init(&bo->mn_list); radeon_mn_destroy()
137 struct radeon_bo *bo; radeon_mn_invalidate_range_start() local
143 list_for_each_entry(bo, &node->bos, mn_list) { radeon_mn_invalidate_range_start()
145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) radeon_mn_invalidate_range_start()
148 r = radeon_bo_reserve(bo, true); radeon_mn_invalidate_range_start()
150 DRM_ERROR("(%ld) failed to reserve user bo\n", r); radeon_mn_invalidate_range_start()
154 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, radeon_mn_invalidate_range_start()
157 DRM_ERROR("(%ld) failed to wait for user bo\n", r); radeon_mn_invalidate_range_start()
159 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); radeon_mn_invalidate_range_start()
160 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); radeon_mn_invalidate_range_start()
162 DRM_ERROR("(%ld) failed to validate user bo\n", r); radeon_mn_invalidate_range_start()
164 radeon_bo_unreserve(bo); radeon_mn_invalidate_range_start()
231 * @bo: radeon buffer object
237 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) radeon_mn_register() argument
239 unsigned long end = addr + radeon_bo_size(bo) - 1; radeon_mn_register()
240 struct radeon_device *rdev = bo->rdev; radeon_mn_register()
271 bo->mn = rmn; radeon_mn_register()
277 list_add(&bo->mn_list, &node->bos); radeon_mn_register()
289 * @bo: radeon buffer object
293 void radeon_mn_unregister(struct radeon_bo *bo) radeon_mn_unregister() argument
295 struct radeon_device *rdev = bo->rdev; radeon_mn_unregister()
300 rmn = bo->mn; radeon_mn_unregister()
308 head = bo->mn_list.next; radeon_mn_unregister()
310 bo->mn = NULL; radeon_mn_unregister()
311 list_del(&bo->mn_list); radeon_mn_unregister()
H A Dradeon_sa.c32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
57 sa_manager->bo = NULL; radeon_sa_bo_manager_init()
68 domain, flags, NULL, NULL, &sa_manager->bo); radeon_sa_bo_manager_init()
70 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); radeon_sa_bo_manager_init()
92 radeon_bo_unref(&sa_manager->bo); radeon_sa_bo_manager_fini()
101 if (sa_manager->bo == NULL) { radeon_sa_bo_manager_start()
102 dev_err(rdev->dev, "no bo for sa manager\n"); radeon_sa_bo_manager_start()
107 r = radeon_bo_reserve(sa_manager->bo, false); radeon_sa_bo_manager_start()
109 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); radeon_sa_bo_manager_start()
112 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); radeon_sa_bo_manager_start()
114 radeon_bo_unreserve(sa_manager->bo); radeon_sa_bo_manager_start()
115 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); radeon_sa_bo_manager_start()
118 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); radeon_sa_bo_manager_start()
119 radeon_bo_unreserve(sa_manager->bo); radeon_sa_bo_manager_start()
128 if (sa_manager->bo == NULL) { radeon_sa_bo_manager_suspend()
129 dev_err(rdev->dev, "no bo for sa manager\n"); radeon_sa_bo_manager_suspend()
133 r = radeon_bo_reserve(sa_manager->bo, false); radeon_sa_bo_manager_suspend()
135 radeon_bo_kunmap(sa_manager->bo); radeon_sa_bo_manager_suspend()
136 radeon_bo_unpin(sa_manager->bo); radeon_sa_bo_manager_suspend()
137 radeon_bo_unreserve(sa_manager->bo); radeon_sa_bo_manager_suspend()
294 /* this sa bo is the closest one */ radeon_sa_bo_next_hole()
H A Dradeon_vm.c144 list[0].tv.bo = &vm->page_directory->tbo; radeon_vm_get_bos()
150 if (!vm->page_tables[i].bo) radeon_vm_get_bos()
153 list[idx].robj = vm->page_tables[i].bo; radeon_vm_get_bos()
156 list[idx].tv.bo = &list[idx].robj->tbo; radeon_vm_get_bos()
282 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
285 * @bo: requested buffer object
287 * Find @bo inside the requested vm (cayman+).
294 struct radeon_bo *bo) radeon_vm_bo_find()
298 list_for_each_entry(bo_va, &bo->va, bo_list) { radeon_vm_bo_find()
307 * radeon_vm_bo_add - add a bo to a specific vm
311 * @bo: radeon buffer object
313 * Add @bo into the requested vm (cayman+).
314 * Add @bo to the list of bos associated with the vm
321 struct radeon_bo *bo) radeon_vm_bo_add()
330 bo_va->bo = bo; radeon_vm_bo_add()
340 list_add_tail(&bo_va->bo_list, &bo->va); radeon_vm_bo_add()
386 * @bo: bo to clear
389 struct radeon_bo *bo) radeon_vm_clear_bo()
396 r = radeon_bo_reserve(bo, false); radeon_vm_clear_bo()
400 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); radeon_vm_clear_bo()
404 addr = radeon_bo_gpu_offset(bo); radeon_vm_clear_bo()
405 entries = radeon_bo_size(bo) / 8; radeon_vm_clear_bo()
422 radeon_bo_fence(bo, ib.fence, false); radeon_vm_clear_bo()
428 radeon_bo_unreserve(bo); radeon_vm_clear_bo()
451 uint64_t size = radeon_bo_size(bo_va->bo); radeon_vm_bo_set_addr()
486 /* bo and tmp overlap, invalid offset */ radeon_vm_bo_set_addr()
487 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " radeon_vm_bo_set_addr()
488 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, radeon_vm_bo_set_addr()
489 soffset, tmp->bo, tmp->it.start, tmp->it.last); radeon_vm_bo_set_addr()
510 tmp->bo = radeon_bo_ref(bo_va->bo); radeon_vm_bo_set_addr()
540 radeon_bo_unreserve(bo_va->bo); radeon_vm_bo_set_addr()
546 if (vm->page_tables[pt_idx].bo) radeon_vm_bo_set_addr()
567 if (vm->page_tables[pt_idx].bo) { radeon_vm_bo_set_addr()
576 vm->page_tables[pt_idx].bo = pt; radeon_vm_bo_set_addr()
583 radeon_bo_unreserve(bo_va->bo); radeon_vm_bo_set_addr()
670 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; radeon_vm_update_page_directory() local
673 if (bo == NULL) radeon_vm_update_page_directory()
676 pt = radeon_bo_gpu_offset(bo); radeon_vm_update_page_directory()
829 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; radeon_vm_update_ptes()
897 radeon_bo_fence(vm->page_tables[i].bo, fence, true); radeon_vm_fence_pts()
901 * radeon_vm_bo_update - map a bo into the vm page table
905 * @bo: radeon buffer object
908 * Fill in the page table entries for @bo (cayman+).
925 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", radeon_vm_bo_update()
926 bo_va->bo, vm); radeon_vm_bo_update()
937 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) radeon_vm_bo_update()
947 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) radeon_vm_bo_update()
1057 radeon_bo_unref(&bo_va->bo); radeon_vm_clear_freed()
1105 * radeon_vm_bo_rmv - remove a bo to a specific vm
1110 * Remove @bo_va->bo from the requested vm (cayman+).
1128 bo_va->bo = radeon_bo_ref(bo_va->bo); radeon_vm_bo_rmv()
1140 * radeon_vm_bo_invalidate - mark the bo as invalid
1144 * @bo: radeon buffer object
1146 * Mark @bo as invalid (cayman+).
1149 struct radeon_bo *bo) radeon_vm_bo_invalidate()
1153 list_for_each_entry(bo_va, &bo->va, bo_list) { radeon_vm_bo_invalidate()
1224 * Unbind the VM and remove all bos from the vm bo list
1232 dev_err(rdev->dev, "still active bo inside vm\n"); radeon_vm_fini()
1236 r = radeon_bo_reserve(bo_va->bo, false); radeon_vm_fini()
1239 radeon_bo_unreserve(bo_va->bo); radeon_vm_fini()
1245 radeon_bo_unref(&bo_va->bo); radeon_vm_fini()
1251 radeon_bo_unref(&vm->page_tables[i].bo); radeon_vm_fini()
293 radeon_vm_bo_find(struct radeon_vm *vm, struct radeon_bo *bo) radeon_vm_bo_find() argument
319 radeon_vm_bo_add(struct radeon_device *rdev, struct radeon_vm *vm, struct radeon_bo *bo) radeon_vm_bo_add() argument
388 radeon_vm_clear_bo(struct radeon_device *rdev, struct radeon_bo *bo) radeon_vm_clear_bo() argument
1148 radeon_vm_bo_invalidate(struct radeon_device *rdev, struct radeon_bo *bo) radeon_vm_bo_invalidate() argument
H A Dradeon_trace.h15 TP_PROTO(struct radeon_bo *bo),
16 TP_ARGS(bo),
18 __field(struct radeon_bo *, bo)
23 __entry->bo = bo;
24 __entry->pages = bo->tbo.num_pages;
26 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
H A Dradeon_ttm.c178 static void radeon_evict_flags(struct ttm_buffer_object *bo, radeon_evict_flags() argument
189 if (!radeon_ttm_bo_is_radeon_bo(bo)) { radeon_evict_flags()
196 rbo = container_of(bo, struct radeon_bo, tbo); radeon_evict_flags()
197 switch (bo->mem.mem_type) { radeon_evict_flags()
202 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { radeon_evict_flags()
234 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) radeon_verify_access() argument
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); radeon_verify_access()
238 if (radeon_ttm_tt_has_userptr(bo->ttm)) radeon_verify_access()
243 static void radeon_move_null(struct ttm_buffer_object *bo, radeon_move_null() argument
246 struct ttm_mem_reg *old_mem = &bo->mem; radeon_move_null()
253 static int radeon_move_blit(struct ttm_buffer_object *bo, radeon_move_blit() argument
264 rdev = radeon_get_rdev(bo->bdev); radeon_move_blit()
299 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); radeon_move_blit()
303 r = ttm_bo_move_accel_cleanup(bo, &fence->base, radeon_move_blit()
309 static int radeon_move_vram_ram(struct ttm_buffer_object *bo, radeon_move_vram_ram() argument
315 struct ttm_mem_reg *old_mem = &bo->mem; radeon_move_vram_ram()
321 rdev = radeon_get_rdev(bo->bdev); radeon_move_vram_ram()
331 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, radeon_move_vram_ram()
337 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); radeon_move_vram_ram()
342 r = ttm_tt_bind(bo->ttm, &tmp_mem); radeon_move_vram_ram()
346 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); radeon_move_vram_ram()
350 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); radeon_move_vram_ram()
352 ttm_bo_mem_put(bo, &tmp_mem); radeon_move_vram_ram()
356 static int radeon_move_ram_vram(struct ttm_buffer_object *bo, radeon_move_ram_vram() argument
362 struct ttm_mem_reg *old_mem = &bo->mem; radeon_move_ram_vram()
368 rdev = radeon_get_rdev(bo->bdev); radeon_move_ram_vram()
378 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, radeon_move_ram_vram()
383 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); radeon_move_ram_vram()
387 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); radeon_move_ram_vram()
392 ttm_bo_mem_put(bo, &tmp_mem); radeon_move_ram_vram()
396 static int radeon_bo_move(struct ttm_buffer_object *bo, radeon_bo_move() argument
402 struct ttm_mem_reg *old_mem = &bo->mem; radeon_bo_move()
405 rdev = radeon_get_rdev(bo->bdev); radeon_bo_move()
406 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_bo_move()
407 radeon_move_null(bo, new_mem); radeon_bo_move()
415 radeon_move_null(bo, new_mem); radeon_bo_move()
426 r = radeon_move_vram_ram(bo, evict, interruptible, radeon_bo_move()
430 r = radeon_move_ram_vram(bo, evict, interruptible, radeon_bo_move()
433 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); radeon_bo_move()
438 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); radeon_bo_move()
445 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); radeon_bo_move()
971 struct ttm_buffer_object *bo; radeon_ttm_fault() local
975 bo = (struct ttm_buffer_object *)vma->vm_private_data; radeon_ttm_fault()
976 if (bo == NULL) { radeon_ttm_fault()
979 rdev = radeon_get_rdev(bo->bdev); radeon_ttm_fault()
H A Dradeon_kfd.c38 struct radeon_bo *bo; member in struct:kgd_mem
215 RADEON_GEM_GTT_WC, NULL, NULL, &(*mem)->bo); alloc_gtt_mem()
223 r = radeon_bo_reserve((*mem)->bo, true); alloc_gtt_mem()
225 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r); alloc_gtt_mem()
229 r = radeon_bo_pin((*mem)->bo, RADEON_GEM_DOMAIN_GTT, alloc_gtt_mem()
232 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r); alloc_gtt_mem()
237 r = radeon_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr); alloc_gtt_mem()
240 "(%d) failed to map bo to kernel for amdkfd\n", r); alloc_gtt_mem()
245 radeon_bo_unreserve((*mem)->bo); alloc_gtt_mem()
250 radeon_bo_unpin((*mem)->bo); alloc_gtt_mem()
252 radeon_bo_unreserve((*mem)->bo); alloc_gtt_mem()
254 radeon_bo_unref(&(*mem)->bo); alloc_gtt_mem()
265 radeon_bo_reserve(mem->bo, true); free_gtt_mem()
266 radeon_bo_kunmap(mem->bo); free_gtt_mem()
267 radeon_bo_unpin(mem->bo); free_gtt_mem()
268 radeon_bo_unreserve(mem->bo); free_gtt_mem()
269 radeon_bo_unref(&(mem->bo)); free_gtt_mem()
H A Dradeon_gem.c59 /* Maximum bo size is the unpinned gtt size since we use the gtt to radeon_gem_object_create()
188 dev_err(rdev->dev, "leaking bo va because " radeon_gem_object_close()
189 "we fail to reserve bo (%d)\n", r); radeon_gem_object_close()
287 struct radeon_bo *bo; radeon_gem_userptr_ioctl() local
322 bo = gem_to_radeon_bo(gobj); radeon_gem_userptr_ioctl()
323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); radeon_gem_userptr_ioctl()
328 r = radeon_mn_register(bo, args->addr); radeon_gem_userptr_ioctl()
335 r = radeon_bo_reserve(bo, true); radeon_gem_userptr_ioctl()
341 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); radeon_gem_userptr_ioctl()
342 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); radeon_gem_userptr_ioctl()
343 radeon_bo_unreserve(bo); radeon_gem_userptr_ioctl()
546 tv.bo = &bo_va->bo->tbo; radeon_gem_va_update_vm()
559 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); radeon_gem_va_update_vm()
572 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); radeon_gem_va_update_vm()
789 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", radeon_debugfs_gem_info()
H A Dradeon_cs.c167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; radeon_cs_parser_relocs()
423 struct radeon_bo *bo = parser->relocs[i].robj; radeon_cs_parser_fini() local
424 if (bo == NULL) radeon_cs_parser_fini()
427 drm_gem_object_unreference_unlocked(&bo->gem_base); radeon_cs_parser_fini()
499 &rdev->ring_tmp_bo.bo->tbo.mem); radeon_bo_vm_update_pte()
504 struct radeon_bo *bo; radeon_bo_vm_update_pte() local
506 bo = p->relocs[i].robj; radeon_bo_vm_update_pte()
507 bo_va = radeon_vm_bo_find(vm, bo); radeon_bo_vm_update_pte()
509 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); radeon_bo_vm_update_pte()
513 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); radeon_bo_vm_update_pte()
819 * Check if next packet is relocation packet3, do bo validation and compute
H A Dradeon_uvd.c147 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r); radeon_uvd_init()
154 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r); radeon_uvd_init()
163 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r); radeon_uvd_init()
421 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, radeon_uvd_cs_msg() argument
436 f = reservation_object_get_excl(bo->tbo.resv); radeon_uvd_cs_msg()
445 r = radeon_bo_kmap(bo, &ptr); radeon_uvd_cs_msg()
467 radeon_bo_kunmap(bo); radeon_uvd_cs_msg()
493 radeon_bo_kunmap(bo); radeon_uvd_cs_msg()
515 radeon_bo_kunmap(bo); radeon_uvd_cs_msg()
731 /* we use the last page of the vcpu bo for the UVD message */ radeon_uvd_get_create_msg()
767 /* we use the last page of the vcpu bo for the UVD message */ radeon_uvd_get_destroy_msg()
H A Devergreen_cs.c435 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_cb()
442 /* old ddx are broken they allocate bo with w*h*bpp but evergreen_cs_track_validate_cb()
475 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " evergreen_cs_track_validate_cb()
476 "offset %d, max layer %d, bo size %ld, slice %d)\n", evergreen_cs_track_validate_cb()
610 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_stencil()
616 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, " evergreen_cs_track_validate_stencil()
617 "offset %ld, max layer %d, bo size %ld)\n", evergreen_cs_track_validate_stencil()
629 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_stencil()
635 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, " evergreen_cs_track_validate_stencil()
636 "offset %ld, max layer %d, bo size %ld)\n", evergreen_cs_track_validate_stencil()
708 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_depth()
714 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, " evergreen_cs_track_validate_depth()
715 "offset %ld, max layer %d, bo size %ld)\n", evergreen_cs_track_validate_depth()
724 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_depth()
730 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, " evergreen_cs_track_validate_depth()
731 "offset %ld, max layer %d, bo size %ld)\n", evergreen_cs_track_validate_depth()
834 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_texture()
839 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n", evergreen_cs_track_validate_texture()
849 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, " evergreen_cs_track_validate_texture()
850 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n", evergreen_cs_track_validate_texture()
909 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, " evergreen_cs_track_validate_texture()
911 "bo size %ld) level0 (%d %d %d)\n", evergreen_cs_track_validate_texture()
949 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", evergreen_cs_track_check()
2433 dev_warn(p->dev, "vbo resource seems too big for the bo\n"); evergreen_packet3_check()
2514 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", evergreen_packet3_check()
2533 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", evergreen_packet3_check()
2562 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", evergreen_packet3_check()
2587 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", evergreen_packet3_check()
2611 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", evergreen_packet3_check()
H A Dr600_cs.c723 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", r600_cs_track_check()
1463 * @texture: texture's bo structure
1464 * @mipmap: mipmap's bo structure
1467 * the texture and mipmap bo object are big enough to cover this resource.
1593 /* using get ib will give us the offset into the texture bo */ r600_check_texture_resource()
1595 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", r600_check_texture_resource()
1602 /* using get ib will give us the offset into the mipmap bo */ r600_check_texture_resource()
1604 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", r600_check_texture_resource()
2004 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", r600_packet3_check()
2103 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); r600_packet3_check()
2109 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", r600_packet3_check()
2115 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2148 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2167 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2196 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2221 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2245 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", r600_packet3_check()
2434 * Return the next reloc, do bo validation and compute
H A Drv770_dma.c61 DRM_ERROR("radeon: moving bo (%d).\n", r); rv770_copy_dma()
H A Dradeon.h436 struct radeon_bo *bo; member in struct:radeon_surface_reg
466 /* bo virtual address in a specific vm */
468 /* protected by bo being reserved */
481 struct radeon_bo *bo; member in struct:radeon_bo_va
499 /* list of all virtual address to which this bo
542 struct radeon_bo *bo; member in struct:radeon_sa_manager
918 struct radeon_bo *bo; member in struct:radeon_vm_pt
1789 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
1790 void radeon_mn_unregister(struct radeon_bo *bo);
1792 static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) radeon_mn_register() argument
1796 static inline void radeon_mn_unregister(struct radeon_bo *bo) {} radeon_mn_unregister() argument
1914 /* copy functions for bo handling */
1928 /* method used for bo copy */
1934 /* ring used for bo copies */
2986 extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
3029 struct radeon_bo *bo);
3031 struct radeon_bo *bo);
3034 struct radeon_bo *bo);
H A Devergreen_dma.c127 DRM_ERROR("radeon: moving bo (%d).\n", r); evergreen_copy_dma()
H A Dradeon_benchmark.c81 DRM_INFO("radeon: %s %u bo moves of %u kB from" radeon_benchmark_log_results()
H A Dradeon_vce.c132 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r); radeon_vce_init()
139 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); radeon_vce_init()
148 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r); radeon_vce_init()
217 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r); radeon_vce_resume()
H A Dradeon_kms.c648 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); radeon_driver_open_kms()
658 rdev->ring_tmp_bo.bo); radeon_driver_open_kms()
697 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); radeon_driver_postclose_kms()
701 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); radeon_driver_postclose_kms()
H A Dsi_dma.c250 DRM_ERROR("radeon: moving bo (%d).\n", r); si_copy_dma()
H A Devergreen.c4153 dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r); sumo_rlc_fini()
4165 dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r); sumo_rlc_fini()
4177 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); sumo_rlc_fini()
4212 dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r); sumo_rlc_init()
4226 dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r); sumo_rlc_init()
4233 dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r); sumo_rlc_init()
4291 dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r); sumo_rlc_init()
4305 dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r); sumo_rlc_init()
4312 dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r); sumo_rlc_init()
4368 dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r); sumo_rlc_init()
4376 dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r); sumo_rlc_init()
4384 dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r); sumo_rlc_init()
4390 dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r); sumo_rlc_init()
H A Dradeon_ib.c75 * space and soffset is the offset inside the pool bo radeon_ib_get()
H A Dradeon_device.c219 if (rdev->surface_regs[i].bo) radeon_surface_init()
220 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo); radeon_surface_init()
471 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); radeon_wb_init()
483 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); radeon_wb_init()
490 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); radeon_wb_init()
H A Dr200.c101 DRM_ERROR("radeon: moving bo (%d).\n", r); r200_copy_dma()
H A Dr600_dma.c458 DRM_ERROR("radeon: moving bo (%d).\n", r); r600_copy_dma()
H A Dradeon_pm.c146 struct radeon_bo *bo, *n; radeon_unmap_vram_bos() local
151 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { radeon_unmap_vram_bos()
152 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) radeon_unmap_vram_bos()
153 ttm_bo_unmap_virtual(&bo->tbo); radeon_unmap_vram_bos()
H A Dcik.c4061 DRM_ERROR("radeon: moving bo (%d).\n", r); cik_copy_cpdma()
4745 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r); cik_cp_compute_fini()
4763 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r); cik_mec_fini()
4796 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r); cik_mec_init()
4809 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r); cik_mec_init()
4815 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r); cik_mec_init()
4962 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r); cik_cp_compute_resume()
4975 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r); cik_cp_compute_resume()
4981 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r); cik_cp_compute_resume()
H A Dcik_sdma.c598 DRM_ERROR("radeon: moving bo (%d).\n", r); cik_copy_dma()
H A Dr100.c909 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); r100_copy_blit()
H A Dr600.c2933 DRM_ERROR("radeon: moving bo (%d).\n", r); r600_copy_cpdma()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_bo.c85 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, ttm_bo_mem_space_debug() argument
91 bo, bo->mem.num_pages, bo->mem.size >> 10, ttm_bo_mem_space_debug()
92 bo->mem.size >> 20); ttm_bo_mem_space_debug()
100 ttm_mem_type_debug(bo->bdev, mem_type); ttm_bo_mem_space_debug()
138 struct ttm_buffer_object *bo = ttm_bo_release_list() local
140 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_release_list()
141 size_t acc_size = bo->acc_size; ttm_bo_release_list()
143 BUG_ON(atomic_read(&bo->list_kref.refcount)); ttm_bo_release_list()
144 BUG_ON(atomic_read(&bo->kref.refcount)); ttm_bo_release_list()
145 BUG_ON(atomic_read(&bo->cpu_writers)); ttm_bo_release_list()
146 BUG_ON(bo->mem.mm_node != NULL); ttm_bo_release_list()
147 BUG_ON(!list_empty(&bo->lru)); ttm_bo_release_list()
148 BUG_ON(!list_empty(&bo->ddestroy)); ttm_bo_release_list()
150 if (bo->ttm) ttm_bo_release_list()
151 ttm_tt_destroy(bo->ttm); ttm_bo_release_list()
152 atomic_dec(&bo->glob->bo_count); ttm_bo_release_list()
153 if (bo->resv == &bo->ttm_resv) ttm_bo_release_list()
154 reservation_object_fini(&bo->ttm_resv); ttm_bo_release_list()
155 mutex_destroy(&bo->wu_mutex); ttm_bo_release_list()
156 if (bo->destroy) ttm_bo_release_list()
157 bo->destroy(bo); ttm_bo_release_list()
159 kfree(bo); ttm_bo_release_list()
164 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) ttm_bo_add_to_lru() argument
166 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_add_to_lru()
169 lockdep_assert_held(&bo->resv->lock.base); ttm_bo_add_to_lru()
171 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { ttm_bo_add_to_lru()
173 BUG_ON(!list_empty(&bo->lru)); ttm_bo_add_to_lru()
175 man = &bdev->man[bo->mem.mem_type]; ttm_bo_add_to_lru()
176 list_add_tail(&bo->lru, &man->lru); ttm_bo_add_to_lru()
177 kref_get(&bo->list_kref); ttm_bo_add_to_lru()
179 if (bo->ttm != NULL) { ttm_bo_add_to_lru()
180 list_add_tail(&bo->swap, &bo->glob->swap_lru); ttm_bo_add_to_lru()
181 kref_get(&bo->list_kref); ttm_bo_add_to_lru()
187 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) ttm_bo_del_from_lru() argument
191 if (!list_empty(&bo->swap)) { ttm_bo_del_from_lru()
192 list_del_init(&bo->swap); ttm_bo_del_from_lru()
195 if (!list_empty(&bo->lru)) { ttm_bo_del_from_lru()
196 list_del_init(&bo->lru); ttm_bo_del_from_lru()
213 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, ttm_bo_list_ref_sub() argument
216 kref_sub(&bo->list_kref, count, ttm_bo_list_ref_sub()
220 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) ttm_bo_del_sub_from_lru() argument
224 spin_lock(&bo->glob->lru_lock); ttm_bo_del_sub_from_lru()
225 put_count = ttm_bo_del_from_lru(bo); ttm_bo_del_sub_from_lru()
226 spin_unlock(&bo->glob->lru_lock); ttm_bo_del_sub_from_lru()
227 ttm_bo_list_ref_sub(bo, put_count, true); ttm_bo_del_sub_from_lru()
232 * Call bo->mutex locked.
234 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ttm_bo_add_ttm() argument
236 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_add_ttm()
237 struct ttm_bo_global *glob = bo->glob; ttm_bo_add_ttm()
241 TTM_ASSERT_LOCKED(&bo->mutex); ttm_bo_add_ttm()
242 bo->ttm = NULL; ttm_bo_add_ttm()
247 switch (bo->type) { ttm_bo_add_ttm()
252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
254 if (unlikely(bo->ttm == NULL)) ttm_bo_add_ttm()
258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
261 if (unlikely(bo->ttm == NULL)) { ttm_bo_add_ttm()
265 bo->ttm->sg = bo->sg; ttm_bo_add_ttm()
276 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ttm_bo_handle_move_mem() argument
281 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_handle_move_mem()
282 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); ttm_bo_handle_move_mem()
284 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; ttm_bo_handle_move_mem()
289 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ttm_bo_handle_move_mem()
293 ttm_bo_unmap_virtual_locked(bo); ttm_bo_handle_move_mem()
302 if (bo->ttm == NULL) { ttm_bo_handle_move_mem()
304 ret = ttm_bo_add_ttm(bo, zero); ttm_bo_handle_move_mem()
309 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); ttm_bo_handle_move_mem()
314 ret = ttm_tt_bind(bo->ttm, mem); ttm_bo_handle_move_mem()
319 if (bo->mem.mem_type == TTM_PL_SYSTEM) { ttm_bo_handle_move_mem()
321 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
322 bo->mem = *mem; ttm_bo_handle_move_mem()
329 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
333 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); ttm_bo_handle_move_mem()
335 ret = bdev->driver->move(bo, evict, interruptible, ttm_bo_handle_move_mem()
338 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); ttm_bo_handle_move_mem()
343 *mem = bo->mem; ttm_bo_handle_move_mem()
344 bo->mem = tmp_mem; ttm_bo_handle_move_mem()
345 bdev->driver->move_notify(bo, mem); ttm_bo_handle_move_mem()
346 bo->mem = *mem; ttm_bo_handle_move_mem()
354 if (bo->evicted) { ttm_bo_handle_move_mem()
356 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); ttm_bo_handle_move_mem()
360 bo->evicted = false; ttm_bo_handle_move_mem()
363 if (bo->mem.mm_node) { ttm_bo_handle_move_mem()
364 bo->offset = (bo->mem.start << PAGE_SHIFT) + ttm_bo_handle_move_mem()
365 bdev->man[bo->mem.mem_type].gpu_offset; ttm_bo_handle_move_mem()
366 bo->cur_placement = bo->mem.placement; ttm_bo_handle_move_mem()
368 bo->offset = 0; ttm_bo_handle_move_mem()
373 new_man = &bdev->man[bo->mem.mem_type]; ttm_bo_handle_move_mem()
374 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_bo_handle_move_mem()
375 ttm_tt_unbind(bo->ttm); ttm_bo_handle_move_mem()
376 ttm_tt_destroy(bo->ttm); ttm_bo_handle_move_mem()
377 bo->ttm = NULL; ttm_bo_handle_move_mem()
384 * Call bo::reserved.
388 * Will release the bo::reserved lock.
391 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_bo_cleanup_memtype_use() argument
393 if (bo->bdev->driver->move_notify) ttm_bo_cleanup_memtype_use()
394 bo->bdev->driver->move_notify(bo, NULL); ttm_bo_cleanup_memtype_use()
396 if (bo->ttm) { ttm_bo_cleanup_memtype_use()
397 ttm_tt_unbind(bo->ttm); ttm_bo_cleanup_memtype_use()
398 ttm_tt_destroy(bo->ttm); ttm_bo_cleanup_memtype_use()
399 bo->ttm = NULL; ttm_bo_cleanup_memtype_use()
401 ttm_bo_mem_put(bo, &bo->mem); ttm_bo_cleanup_memtype_use()
403 ww_mutex_unlock (&bo->resv->lock); ttm_bo_cleanup_memtype_use()
406 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) ttm_bo_flush_all_fences() argument
412 fobj = reservation_object_get_list(bo->resv); ttm_bo_flush_all_fences()
413 fence = reservation_object_get_excl(bo->resv); ttm_bo_flush_all_fences()
419 reservation_object_held(bo->resv)); ttm_bo_flush_all_fences()
426 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ttm_bo_cleanup_refs_or_queue() argument
428 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_cleanup_refs_or_queue()
429 struct ttm_bo_global *glob = bo->glob; ttm_bo_cleanup_refs_or_queue()
434 ret = __ttm_bo_reserve(bo, false, true, false, NULL); ttm_bo_cleanup_refs_or_queue()
437 if (!ttm_bo_wait(bo, false, false, true)) { ttm_bo_cleanup_refs_or_queue()
438 put_count = ttm_bo_del_from_lru(bo); ttm_bo_cleanup_refs_or_queue()
441 ttm_bo_cleanup_memtype_use(bo); ttm_bo_cleanup_refs_or_queue()
443 ttm_bo_list_ref_sub(bo, put_count, true); ttm_bo_cleanup_refs_or_queue()
447 ttm_bo_flush_all_fences(bo); ttm_bo_cleanup_refs_or_queue()
454 if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { ttm_bo_cleanup_refs_or_queue()
455 bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; ttm_bo_cleanup_refs_or_queue()
456 ttm_bo_add_to_lru(bo); ttm_bo_cleanup_refs_or_queue()
459 __ttm_bo_unreserve(bo); ttm_bo_cleanup_refs_or_queue()
462 kref_get(&bo->list_kref); ttm_bo_cleanup_refs_or_queue()
463 list_add_tail(&bo->ddestroy, &bdev->ddestroy); ttm_bo_cleanup_refs_or_queue()
472 * If bo idle, remove from delayed- and lru lists, and unref.
482 static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, ttm_bo_cleanup_refs_and_unlock() argument
486 struct ttm_bo_global *glob = bo->glob; ttm_bo_cleanup_refs_and_unlock()
490 ret = ttm_bo_wait(bo, false, false, true); ttm_bo_cleanup_refs_and_unlock()
494 ww_mutex_unlock(&bo->resv->lock); ttm_bo_cleanup_refs_and_unlock()
497 lret = reservation_object_wait_timeout_rcu(bo->resv, ttm_bo_cleanup_refs_and_unlock()
508 ret = __ttm_bo_reserve(bo, false, true, false, NULL); ttm_bo_cleanup_refs_and_unlock()
527 ret = ttm_bo_wait(bo, false, false, true); ttm_bo_cleanup_refs_and_unlock()
531 if (ret || unlikely(list_empty(&bo->ddestroy))) { ttm_bo_cleanup_refs_and_unlock()
532 __ttm_bo_unreserve(bo); ttm_bo_cleanup_refs_and_unlock()
537 put_count = ttm_bo_del_from_lru(bo); ttm_bo_cleanup_refs_and_unlock()
538 list_del_init(&bo->ddestroy); ttm_bo_cleanup_refs_and_unlock()
542 ttm_bo_cleanup_memtype_use(bo); ttm_bo_cleanup_refs_and_unlock()
544 ttm_bo_list_ref_sub(bo, put_count, true); ttm_bo_cleanup_refs_and_unlock()
623 struct ttm_buffer_object *bo = ttm_bo_release() local
625 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_release()
626 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; ttm_bo_release()
628 drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_bo_release()
630 ttm_mem_io_free_vm(bo); ttm_bo_release()
632 ttm_bo_cleanup_refs_or_queue(bo); ttm_bo_release()
633 kref_put(&bo->list_kref, ttm_bo_release_list); ttm_bo_release()
638 struct ttm_buffer_object *bo = *p_bo; ttm_bo_unref() local
641 kref_put(&bo->kref, ttm_bo_release); ttm_bo_unref()
659 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ttm_bo_evict() argument
662 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_evict()
667 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); ttm_bo_evict()
676 lockdep_assert_held(&bo->resv->lock.base); ttm_bo_evict()
678 evict_mem = bo->mem; ttm_bo_evict()
685 bdev->driver->evict_flags(bo, &placement); ttm_bo_evict()
686 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, ttm_bo_evict()
691 bo); ttm_bo_evict()
692 ttm_bo_mem_space_debug(bo, &placement); ttm_bo_evict()
697 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, ttm_bo_evict()
702 ttm_bo_mem_put(bo, &evict_mem); ttm_bo_evict()
705 bo->evicted = true; ttm_bo_evict()
718 struct ttm_buffer_object *bo; ttm_mem_evict_first() local
722 list_for_each_entry(bo, &man->lru, lru) { ttm_mem_evict_first()
723 ret = __ttm_bo_reserve(bo, false, true, false, NULL); ttm_mem_evict_first()
729 if (place->fpfn >= (bo->mem.start + bo->mem.size) || ttm_mem_evict_first()
730 (place->lpfn && place->lpfn <= bo->mem.start)) { ttm_mem_evict_first()
731 __ttm_bo_unreserve(bo); ttm_mem_evict_first()
746 kref_get(&bo->list_kref); ttm_mem_evict_first()
748 if (!list_empty(&bo->ddestroy)) { ttm_mem_evict_first()
749 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible, ttm_mem_evict_first()
751 kref_put(&bo->list_kref, ttm_bo_release_list); ttm_mem_evict_first()
755 put_count = ttm_bo_del_from_lru(bo); ttm_mem_evict_first()
760 ttm_bo_list_ref_sub(bo, put_count, true); ttm_mem_evict_first()
762 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); ttm_mem_evict_first()
763 ttm_bo_unreserve(bo); ttm_mem_evict_first()
765 kref_put(&bo->list_kref, ttm_bo_release_list); ttm_mem_evict_first()
769 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) ttm_bo_mem_put() argument
771 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; ttm_bo_mem_put()
782 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, ttm_bo_mem_force_space() argument
789 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_mem_force_space()
794 ret = (*man->func->get_node)(man, bo, place, mem); ttm_bo_mem_force_space()
862 int ttm_bo_mem_space(struct ttm_buffer_object *bo, ttm_bo_mem_space() argument
868 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_mem_space()
892 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, ttm_bo_mem_space()
906 ret = (*man->func->get_node)(man, bo, place, mem); ttm_bo_mem_space()
935 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, ttm_bo_mem_space()
951 ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, ttm_bo_mem_space()
965 static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, ttm_bo_move_buffer() argument
973 lockdep_assert_held(&bo->resv->lock.base); ttm_bo_move_buffer()
980 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); ttm_bo_move_buffer()
983 mem.num_pages = bo->num_pages; ttm_bo_move_buffer()
985 mem.page_alignment = bo->mem.page_alignment; ttm_bo_move_buffer()
991 ret = ttm_bo_mem_space(bo, placement, &mem, ttm_bo_move_buffer()
995 ret = ttm_bo_handle_move_mem(bo, &mem, false, ttm_bo_move_buffer()
999 ttm_bo_mem_put(bo, &mem); ttm_bo_move_buffer()
1038 int ttm_bo_validate(struct ttm_buffer_object *bo, ttm_bo_validate() argument
1046 lockdep_assert_held(&bo->resv->lock.base); ttm_bo_validate()
1050 if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { ttm_bo_validate()
1051 ret = ttm_bo_move_buffer(bo, placement, interruptible, ttm_bo_validate()
1060 ttm_flag_masked(&bo->mem.placement, new_flags, ttm_bo_validate()
1066 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_validate()
1067 ret = ttm_bo_add_ttm(bo, true); ttm_bo_validate()
1076 struct ttm_buffer_object *bo, ttm_bo_init()
1097 (*destroy)(bo); ttm_bo_init()
1099 kfree(bo); ttm_bo_init()
1107 (*destroy)(bo); ttm_bo_init()
1109 kfree(bo); ttm_bo_init()
1113 bo->destroy = destroy; ttm_bo_init()
1115 kref_init(&bo->kref); ttm_bo_init()
1116 kref_init(&bo->list_kref); ttm_bo_init()
1117 atomic_set(&bo->cpu_writers, 0); ttm_bo_init()
1118 INIT_LIST_HEAD(&bo->lru); ttm_bo_init()
1119 INIT_LIST_HEAD(&bo->ddestroy); ttm_bo_init()
1120 INIT_LIST_HEAD(&bo->swap); ttm_bo_init()
1121 INIT_LIST_HEAD(&bo->io_reserve_lru); ttm_bo_init()
1122 mutex_init(&bo->wu_mutex); ttm_bo_init()
1123 bo->bdev = bdev; ttm_bo_init()
1124 bo->glob = bdev->glob; ttm_bo_init()
1125 bo->type = type; ttm_bo_init()
1126 bo->num_pages = num_pages; ttm_bo_init()
1127 bo->mem.size = num_pages << PAGE_SHIFT; ttm_bo_init()
1128 bo->mem.mem_type = TTM_PL_SYSTEM; ttm_bo_init()
1129 bo->mem.num_pages = bo->num_pages; ttm_bo_init()
1130 bo->mem.mm_node = NULL; ttm_bo_init()
1131 bo->mem.page_alignment = page_alignment; ttm_bo_init()
1132 bo->mem.bus.io_reserved_vm = false; ttm_bo_init()
1133 bo->mem.bus.io_reserved_count = 0; ttm_bo_init()
1134 bo->priv_flags = 0; ttm_bo_init()
1135 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); ttm_bo_init()
1136 bo->persistent_swap_storage = persistent_swap_storage; ttm_bo_init()
1137 bo->acc_size = acc_size; ttm_bo_init()
1138 bo->sg = sg; ttm_bo_init()
1140 bo->resv = resv; ttm_bo_init()
1141 lockdep_assert_held(&bo->resv->lock.base); ttm_bo_init()
1143 bo->resv = &bo->ttm_resv; ttm_bo_init()
1144 reservation_object_init(&bo->ttm_resv); ttm_bo_init()
1146 atomic_inc(&bo->glob->bo_count); ttm_bo_init()
1147 drm_vma_node_reset(&bo->vma_node); ttm_bo_init()
1153 if (bo->type == ttm_bo_type_device || ttm_bo_init()
1154 bo->type == ttm_bo_type_sg) ttm_bo_init()
1155 ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, ttm_bo_init()
1156 bo->mem.num_pages); ttm_bo_init()
1162 locked = ww_mutex_trylock(&bo->resv->lock); ttm_bo_init()
1167 ret = ttm_bo_validate(bo, placement, interruptible, false); ttm_bo_init()
1170 ttm_bo_unreserve(bo); ttm_bo_init()
1173 ttm_bo_unref(&bo); ttm_bo_init()
1217 struct ttm_buffer_object *bo; ttm_bo_create() local
1221 bo = kzalloc(sizeof(*bo), GFP_KERNEL); ttm_bo_create()
1222 if (unlikely(bo == NULL)) ttm_bo_create()
1226 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, ttm_bo_create()
1230 *p_bo = bo; ttm_bo_create()
1513 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) ttm_bo_unmap_virtual_locked() argument
1515 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_unmap_virtual_locked()
1517 drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); ttm_bo_unmap_virtual_locked()
1518 ttm_mem_io_free_vm(bo); ttm_bo_unmap_virtual_locked()
1521 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) ttm_bo_unmap_virtual() argument
1523 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_unmap_virtual()
1524 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; ttm_bo_unmap_virtual()
1527 ttm_bo_unmap_virtual_locked(bo); ttm_bo_unmap_virtual()
1534 int ttm_bo_wait(struct ttm_buffer_object *bo, ttm_bo_wait() argument
1543 resv = bo->resv; ttm_bo_wait()
1577 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); ttm_bo_wait()
1582 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ttm_bo_synccpu_write_grab() argument
1590 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL); ttm_bo_synccpu_write_grab()
1593 ret = ttm_bo_wait(bo, false, true, no_wait); ttm_bo_synccpu_write_grab()
1595 atomic_inc(&bo->cpu_writers); ttm_bo_synccpu_write_grab()
1596 ttm_bo_unreserve(bo); ttm_bo_synccpu_write_grab()
1601 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) ttm_bo_synccpu_write_release() argument
1603 atomic_dec(&bo->cpu_writers); ttm_bo_synccpu_write_release()
1616 struct ttm_buffer_object *bo; ttm_bo_swapout() local
1622 list_for_each_entry(bo, &glob->swap_lru, swap) { ttm_bo_swapout()
1623 ret = __ttm_bo_reserve(bo, false, true, false, NULL); ttm_bo_swapout()
1633 kref_get(&bo->list_kref); ttm_bo_swapout()
1635 if (!list_empty(&bo->ddestroy)) { ttm_bo_swapout()
1636 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false); ttm_bo_swapout()
1637 kref_put(&bo->list_kref, ttm_bo_release_list); ttm_bo_swapout()
1641 put_count = ttm_bo_del_from_lru(bo); ttm_bo_swapout()
1644 ttm_bo_list_ref_sub(bo, put_count, true); ttm_bo_swapout()
1650 ret = ttm_bo_wait(bo, false, false, false); ttm_bo_swapout()
1655 if ((bo->mem.placement & swap_placement) != swap_placement) { ttm_bo_swapout()
1658 evict_mem = bo->mem; ttm_bo_swapout()
1663 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, ttm_bo_swapout()
1669 ttm_bo_unmap_virtual(bo); ttm_bo_swapout()
1676 if (bo->bdev->driver->swap_notify) ttm_bo_swapout()
1677 bo->bdev->driver->swap_notify(bo); ttm_bo_swapout()
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); ttm_bo_swapout()
1688 __ttm_bo_unreserve(bo); ttm_bo_swapout()
1689 kref_put(&bo->list_kref, ttm_bo_release_list); ttm_bo_swapout()
1704 * @bo: Pointer to buffer
1706 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) ttm_bo_wait_unreserved() argument
1712 * Use the bo::wu_mutex to avoid triggering livelocks due to ttm_bo_wait_unreserved()
1714 * bo::wu_mutex can go away if we change locking order to ttm_bo_wait_unreserved()
1715 * mmap_sem -> bo::reserve. ttm_bo_wait_unreserved()
1717 ret = mutex_lock_interruptible(&bo->wu_mutex); ttm_bo_wait_unreserved()
1720 if (!ww_mutex_is_locked(&bo->resv->lock)) ttm_bo_wait_unreserved()
1722 ret = __ttm_bo_reserve(bo, true, false, false, NULL); ttm_bo_wait_unreserved()
1725 __ttm_bo_unreserve(bo); ttm_bo_wait_unreserved()
1728 mutex_unlock(&bo->wu_mutex); ttm_bo_wait_unreserved()
1075 ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, bool interruptible, struct file *persistent_swap_storage, size_t acc_size, struct sg_table *sg, struct reservation_object *resv, void (*destroy) (struct ttm_buffer_object *)) ttm_bo_init() argument
H A Dttm_execbuf_util.c39 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry_continue_reverse() local
41 __ttm_bo_unreserve(bo); list_for_each_entry_continue_reverse()
50 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry() local
51 unsigned put_count = ttm_bo_del_from_lru(bo); list_for_each_entry()
53 ttm_bo_list_ref_sub(bo, put_count, true); list_for_each_entry()
67 glob = entry->bo->glob; ttm_eu_backoff_reservation()
71 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry() local
73 ttm_bo_add_to_lru(bo); list_for_each_entry()
74 __ttm_bo_unreserve(bo); list_for_each_entry()
107 glob = entry->bo->glob; ttm_eu_reserve_buffers()
113 struct ttm_buffer_object *bo = entry->bo; list_for_each_entry() local
115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true, list_for_each_entry()
117 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { list_for_each_entry()
118 __ttm_bo_unreserve(bo); list_for_each_entry()
134 ret = reservation_object_reserve_shared(bo->resv); list_for_each_entry()
146 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, list_for_each_entry()
149 ww_mutex_lock_slow(&bo->resv->lock, ticket); list_for_each_entry()
154 ret = reservation_object_reserve_shared(bo->resv); list_for_each_entry()
186 struct ttm_buffer_object *bo; ttm_eu_fence_buffer_objects() local
194 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; ttm_eu_fence_buffer_objects()
195 bdev = bo->bdev; ttm_eu_fence_buffer_objects()
197 glob = bo->glob; ttm_eu_fence_buffer_objects()
202 bo = entry->bo; list_for_each_entry()
204 reservation_object_add_shared_fence(bo->resv, fence); list_for_each_entry()
206 reservation_object_add_excl_fence(bo->resv, fence); list_for_each_entry()
207 ttm_bo_add_to_lru(bo); list_for_each_entry()
208 __ttm_bo_unreserve(bo); list_for_each_entry()
H A Dttm_bo_vm.c44 static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, ttm_bo_vm_fault_idle() argument
50 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) ttm_bo_vm_fault_idle()
56 ret = ttm_bo_wait(bo, false, false, true); ttm_bo_vm_fault_idle()
70 (void) ttm_bo_wait(bo, false, true, false); ttm_bo_vm_fault_idle()
77 ret = ttm_bo_wait(bo, false, true, false); ttm_bo_vm_fault_idle()
88 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) ttm_bo_vm_fault() local
90 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_vm_fault()
101 &bdev->man[bo->mem.mem_type]; ttm_bo_vm_fault()
110 ret = ttm_bo_reserve(bo, true, true, false, NULL); ttm_bo_vm_fault()
118 (void) ttm_bo_wait_unreserved(bo); ttm_bo_vm_fault()
126 * mmap_sem -> bo::reserve, we'd use a blocking reserve here ttm_bo_vm_fault()
136 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { ttm_bo_vm_fault()
142 ret = bdev->driver->fault_reserve_notify(bo); ttm_bo_vm_fault()
160 ret = ttm_bo_vm_fault_idle(bo, vma, vmf); ttm_bo_vm_fault()
171 ret = ttm_mem_io_reserve_vm(bo); ttm_bo_vm_fault()
178 vma->vm_pgoff - drm_vma_node_start(&bo->vma_node); ttm_bo_vm_fault()
180 drm_vma_node_start(&bo->vma_node); ttm_bo_vm_fault()
182 if (unlikely(page_offset >= bo->num_pages)) { ttm_bo_vm_fault()
195 if (bo->mem.bus.is_iomem) { ttm_bo_vm_fault()
196 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, ttm_bo_vm_fault()
199 ttm = bo->ttm; ttm_bo_vm_fault()
200 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, ttm_bo_vm_fault()
215 if (bo->mem.bus.is_iomem) ttm_bo_vm_fault()
216 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset; ttm_bo_vm_fault()
226 page->index = drm_vma_node_start(&bo->vma_node) + ttm_bo_vm_fault()
256 ttm_bo_unreserve(bo); ttm_bo_vm_fault()
262 struct ttm_buffer_object *bo = ttm_bo_vm_open() local
265 WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); ttm_bo_vm_open()
267 (void)ttm_bo_reference(bo); ttm_bo_vm_open()
272 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; ttm_bo_vm_close() local
274 ttm_bo_unref(&bo); ttm_bo_vm_close()
289 struct ttm_buffer_object *bo = NULL; ttm_bo_vm_lookup() local
295 bo = container_of(node, struct ttm_buffer_object, vma_node); ttm_bo_vm_lookup()
296 if (!kref_get_unless_zero(&bo->kref)) ttm_bo_vm_lookup()
297 bo = NULL; ttm_bo_vm_lookup()
302 if (!bo) ttm_bo_vm_lookup()
305 return bo; ttm_bo_vm_lookup()
312 struct ttm_buffer_object *bo; ttm_bo_mmap() local
315 bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma)); ttm_bo_mmap()
316 if (unlikely(!bo)) ttm_bo_mmap()
319 driver = bo->bdev->driver; ttm_bo_mmap()
324 ret = driver->verify_access(bo, filp); ttm_bo_mmap()
331 * Note: We're transferring the bo reference to ttm_bo_mmap()
335 vma->vm_private_data = bo; ttm_bo_mmap()
348 ttm_bo_unref(&bo); ttm_bo_mmap()
353 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) ttm_fbdev_mmap() argument
359 vma->vm_private_data = ttm_bo_reference(bo); ttm_fbdev_mmap()
H A Dttm_bo_util.c42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) ttm_bo_free_old_node() argument
44 ttm_bo_mem_put(bo, &bo->mem); ttm_bo_free_old_node()
47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ttm_bo_move_ttm() argument
51 struct ttm_tt *ttm = bo->ttm; ttm_bo_move_ttm()
52 struct ttm_mem_reg *old_mem = &bo->mem; ttm_bo_move_ttm()
57 ttm_bo_free_old_node(bo); ttm_bo_move_ttm()
104 struct ttm_buffer_object *bo; ttm_mem_io_evict() local
109 bo = list_first_entry(&man->io_reserve_lru, ttm_mem_io_evict()
112 list_del_init(&bo->io_reserve_lru); ttm_mem_io_evict()
113 ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_evict()
160 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) ttm_mem_io_reserve_vm() argument
162 struct ttm_mem_reg *mem = &bo->mem; ttm_mem_io_reserve_vm()
167 &bo->bdev->man[mem->mem_type]; ttm_mem_io_reserve_vm()
169 ret = ttm_mem_io_reserve(bo->bdev, mem); ttm_mem_io_reserve_vm()
174 list_add_tail(&bo->io_reserve_lru, ttm_mem_io_reserve_vm()
180 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) ttm_mem_io_free_vm() argument
182 struct ttm_mem_reg *mem = &bo->mem; ttm_mem_io_free_vm()
186 list_del_init(&bo->io_reserve_lru); ttm_mem_io_free_vm()
187 ttm_mem_io_free(bo->bdev, mem); ttm_mem_io_free_vm()
323 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ttm_bo_move_memcpy() argument
327 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_move_memcpy()
329 struct ttm_tt *ttm = bo->ttm; ttm_bo_move_memcpy()
330 struct ttm_mem_reg *old_mem = &bo->mem; ttm_bo_move_memcpy()
407 bo->ttm = NULL; ttm_bo_move_memcpy()
419 ttm_bo_mem_put(bo, &old_copy); ttm_bo_move_memcpy()
424 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) ttm_transfered_destroy() argument
426 kfree(bo); ttm_transfered_destroy()
432 * @bo: A pointer to a struct ttm_buffer_object.
434 * holding the data of @bo with the old placement.
444 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ttm_buffer_object_transfer() argument
454 *fbo = *bo; ttm_buffer_object_transfer()
506 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, ttm_bo_ioremap() argument
511 struct ttm_mem_reg *mem = &bo->mem; ttm_bo_ioremap()
513 if (bo->mem.bus.addr) { ttm_bo_ioremap()
515 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); ttm_bo_ioremap()
519 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, ttm_bo_ioremap()
522 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, ttm_bo_ioremap()
528 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, ttm_bo_kmap_ttm() argument
533 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; ttm_bo_kmap_ttm()
534 struct ttm_tt *ttm = bo->ttm; ttm_bo_kmap_ttm()
548 * page protection is consistent with the bo. ttm_bo_kmap_ttm()
567 int ttm_bo_kmap(struct ttm_buffer_object *bo, ttm_bo_kmap() argument
572 &bo->bdev->man[bo->mem.mem_type]; ttm_bo_kmap()
576 BUG_ON(!list_empty(&bo->swap)); ttm_bo_kmap()
578 map->bo = bo; ttm_bo_kmap()
579 if (num_pages > bo->num_pages) ttm_bo_kmap()
581 if (start_page > bo->num_pages) ttm_bo_kmap()
588 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); ttm_bo_kmap()
592 if (!bo->mem.bus.is_iomem) { ttm_bo_kmap()
593 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); ttm_bo_kmap()
597 return ttm_bo_ioremap(bo, offset, size, map); ttm_bo_kmap()
604 struct ttm_buffer_object *bo = map->bo; ttm_bo_kunmap() local
606 &bo->bdev->man[bo->mem.mem_type]; ttm_bo_kunmap()
626 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); ttm_bo_kunmap()
633 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, ttm_bo_move_accel_cleanup() argument
639 struct ttm_bo_device *bdev = bo->bdev; ttm_bo_move_accel_cleanup()
641 struct ttm_mem_reg *old_mem = &bo->mem; ttm_bo_move_accel_cleanup()
645 reservation_object_add_excl_fence(bo->resv, fence); ttm_bo_move_accel_cleanup()
647 ret = ttm_bo_wait(bo, false, false, false); ttm_bo_move_accel_cleanup()
652 (bo->ttm != NULL)) { ttm_bo_move_accel_cleanup()
653 ttm_tt_unbind(bo->ttm); ttm_bo_move_accel_cleanup()
654 ttm_tt_destroy(bo->ttm); ttm_bo_move_accel_cleanup()
655 bo->ttm = NULL; ttm_bo_move_accel_cleanup()
657 ttm_bo_free_old_node(bo); ttm_bo_move_accel_cleanup()
667 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); ttm_bo_move_accel_cleanup()
669 ret = ttm_buffer_object_transfer(bo, &ghost_obj); ttm_bo_move_accel_cleanup()
678 * bo to be unbound and destroyed. ttm_bo_move_accel_cleanup()
684 bo->ttm = NULL; ttm_bo_move_accel_cleanup()
H A Dttm_bo_manager.c51 struct ttm_buffer_object *bo, ttm_bo_man_get_node()
50 ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) ttm_bo_man_get_node() argument
/linux-4.1.27/drivers/gpu/drm/ast/
H A Dast_ttm.c97 struct ast_bo *bo; ast_bo_ttm_destroy() local
99 bo = container_of(tbo, struct ast_bo, bo); ast_bo_ttm_destroy()
101 drm_gem_object_release(&bo->gem); ast_bo_ttm_destroy()
102 kfree(bo); ast_bo_ttm_destroy()
105 static bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo) ast_ttm_bo_is_ast_bo() argument
107 if (bo->destroy == &ast_bo_ttm_destroy) ast_ttm_bo_is_ast_bo()
138 ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) ast_bo_evict_flags() argument
140 struct ast_bo *astbo = ast_bo(bo); ast_bo_evict_flags()
142 if (!ast_ttm_bo_is_ast_bo(bo)) ast_bo_evict_flags()
149 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) ast_bo_verify_access() argument
151 struct ast_bo *astbo = ast_bo(bo); ast_bo_verify_access()
189 static int ast_bo_move(struct ttm_buffer_object *bo, ast_bo_move() argument
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); ast_bo_move()
267 DRM_ERROR("Error initialising bo driver; %d\n", ret); ast_mm_init()
293 void ast_ttm_placement(struct ast_bo *bo, int domain) ast_ttm_placement() argument
298 bo->placement.placement = bo->placements; ast_ttm_placement()
299 bo->placement.busy_placement = bo->placements; ast_ttm_placement()
301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; ast_ttm_placement()
303 bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; ast_ttm_placement()
305 bo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; ast_ttm_placement()
306 bo->placement.num_placement = c; ast_ttm_placement()
307 bo->placement.num_busy_placement = c; ast_ttm_placement()
309 bo->placements[i].fpfn = 0; ast_ttm_placement()
310 bo->placements[i].lpfn = 0; ast_ttm_placement()
332 astbo->bo.bdev = &ast->ttm.bdev; ast_bo_create()
339 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ast_bo_create()
350 static inline u64 ast_bo_gpu_offset(struct ast_bo *bo) ast_bo_gpu_offset() argument
352 return bo->bo.offset; ast_bo_gpu_offset()
355 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr) ast_bo_pin() argument
359 if (bo->pin_count) { ast_bo_pin()
360 bo->pin_count++; ast_bo_pin()
362 *gpu_addr = ast_bo_gpu_offset(bo); ast_bo_pin()
365 ast_ttm_placement(bo, pl_flag); ast_bo_pin()
366 for (i = 0; i < bo->placement.num_placement; i++) ast_bo_pin()
367 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ast_bo_pin()
368 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ast_bo_pin()
372 bo->pin_count = 1; ast_bo_pin()
374 *gpu_addr = ast_bo_gpu_offset(bo); ast_bo_pin()
378 int ast_bo_unpin(struct ast_bo *bo) ast_bo_unpin() argument
381 if (!bo->pin_count) { ast_bo_unpin()
382 DRM_ERROR("unpin bad %p\n", bo); ast_bo_unpin()
385 bo->pin_count--; ast_bo_unpin()
386 if (bo->pin_count) ast_bo_unpin()
389 for (i = 0; i < bo->placement.num_placement ; i++) ast_bo_unpin()
390 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ast_bo_unpin()
391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ast_bo_unpin()
398 int ast_bo_push_sysram(struct ast_bo *bo) ast_bo_push_sysram() argument
401 if (!bo->pin_count) { ast_bo_push_sysram()
402 DRM_ERROR("unpin bad %p\n", bo); ast_bo_push_sysram()
405 bo->pin_count--; ast_bo_push_sysram()
406 if (bo->pin_count) ast_bo_push_sysram()
409 if (bo->kmap.virtual) ast_bo_push_sysram()
410 ttm_bo_kunmap(&bo->kmap); ast_bo_push_sysram()
412 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); ast_bo_push_sysram()
413 for (i = 0; i < bo->placement.num_placement ; i++) ast_bo_push_sysram()
414 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; ast_bo_push_sysram()
416 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); ast_bo_push_sysram()
H A Dast_drv.h320 struct ttm_buffer_object bo; member in struct:ast_bo
330 ast_bo(struct ttm_buffer_object *bo) ast_bo() argument
332 return container_of(bo, struct ast_bo, bo); ast_bo()
363 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr);
364 int ast_bo_unpin(struct ast_bo *bo);
366 static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait) ast_bo_reserve() argument
370 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); ast_bo_reserve()
373 DRM_ERROR("reserve failed %p\n", bo); ast_bo_reserve()
379 static inline void ast_bo_unreserve(struct ast_bo *bo) ast_bo_unreserve() argument
381 ttm_bo_unreserve(&bo->bo); ast_bo_unreserve()
384 void ast_ttm_placement(struct ast_bo *bo, int domain);
385 int ast_bo_push_sysram(struct ast_bo *bo);
H A Dast_fb.c51 struct ast_bo *bo; ast_dirty_update() local
61 bo = gem_to_ast_bo(obj); ast_dirty_update()
69 ret = ast_bo_reserve(bo, true); ast_dirty_update()
103 if (!bo->kmap.virtual) { ast_dirty_update()
104 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); ast_dirty_update()
107 ast_bo_unreserve(bo); ast_dirty_update()
115 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); ast_dirty_update()
119 ttm_bo_kunmap(&bo->kmap); ast_dirty_update()
121 ast_bo_unreserve(bo); ast_dirty_update()
199 struct ast_bo *bo = NULL; astfb_create() local
214 bo = gem_to_ast_bo(gobj); astfb_create()
H A Dast_mode.c515 struct ast_bo *bo; ast_crtc_do_set_base() local
523 bo = gem_to_ast_bo(obj); ast_crtc_do_set_base()
524 ret = ast_bo_reserve(bo, false); ast_crtc_do_set_base()
527 ast_bo_push_sysram(bo); ast_crtc_do_set_base()
528 ast_bo_unreserve(bo); ast_crtc_do_set_base()
533 bo = gem_to_ast_bo(obj); ast_crtc_do_set_base()
535 ret = ast_bo_reserve(bo, false); ast_crtc_do_set_base()
539 ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); ast_crtc_do_set_base()
541 ast_bo_unreserve(bo); ast_crtc_do_set_base()
547 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); ast_crtc_do_set_base()
551 ast_bo_unreserve(bo); ast_crtc_do_set_base()
912 struct ast_bo *bo; ast_cursor_init() local
920 bo = gem_to_ast_bo(obj); ast_cursor_init()
921 ret = ast_bo_reserve(bo, false); ast_cursor_init()
925 ret = ast_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); ast_cursor_init()
926 ast_bo_unreserve(bo); ast_cursor_init()
931 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &ast->cache_kmap); ast_cursor_init()
1143 struct ast_bo *bo; ast_cursor_set() local
1163 bo = gem_to_ast_bo(obj); ast_cursor_set()
1165 ret = ast_bo_reserve(bo, false); ast_cursor_set()
1169 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); ast_cursor_set()
1175 DRM_ERROR("src cursor bo should be in main memory\n"); ast_cursor_set()
1177 DRM_ERROR("dst bo should be in VRAM\n"); ast_cursor_set()
1186 ast_bo_unreserve(bo); ast_cursor_set()
H A Dast_main.c543 static void ast_bo_unref(struct ast_bo **bo) ast_bo_unref() argument
547 if ((*bo) == NULL) ast_bo_unref()
550 tbo = &((*bo)->bo); ast_bo_unref()
552 *bo = NULL; ast_bo_unref()
563 static inline u64 ast_bo_mmap_offset(struct ast_bo *bo) ast_bo_mmap_offset() argument
565 return drm_vma_node_offset_addr(&bo->bo.vma_node); ast_bo_mmap_offset()
575 struct ast_bo *bo; ast_dumb_mmap_offset() local
584 bo = gem_to_ast_bo(obj); ast_dumb_mmap_offset()
585 *offset = ast_bo_mmap_offset(bo); ast_dumb_mmap_offset()
/linux-4.1.27/drivers/gpu/drm/mgag200/
H A Dmgag200_ttm.c97 struct mgag200_bo *bo; mgag200_bo_ttm_destroy() local
99 bo = container_of(tbo, struct mgag200_bo, bo); mgag200_bo_ttm_destroy()
101 drm_gem_object_release(&bo->gem); mgag200_bo_ttm_destroy()
102 kfree(bo); mgag200_bo_ttm_destroy()
105 static bool mgag200_ttm_bo_is_mgag200_bo(struct ttm_buffer_object *bo) mgag200_ttm_bo_is_mgag200_bo() argument
107 if (bo->destroy == &mgag200_bo_ttm_destroy) mgag200_ttm_bo_is_mgag200_bo()
138 mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) mgag200_bo_evict_flags() argument
140 struct mgag200_bo *mgabo = mgag200_bo(bo); mgag200_bo_evict_flags()
142 if (!mgag200_ttm_bo_is_mgag200_bo(bo)) mgag200_bo_evict_flags()
149 static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) mgag200_bo_verify_access() argument
151 struct mgag200_bo *mgabo = mgag200_bo(bo); mgag200_bo_verify_access()
189 static int mgag200_bo_move(struct ttm_buffer_object *bo, mgag200_bo_move() argument
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); mgag200_bo_move()
267 DRM_ERROR("Error initialising bo driver; %d\n", ret); mgag200_mm_init()
293 void mgag200_ttm_placement(struct mgag200_bo *bo, int domain) mgag200_ttm_placement() argument
298 bo->placement.placement = bo->placements; mgag200_ttm_placement()
299 bo->placement.busy_placement = bo->placements; mgag200_ttm_placement()
301 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; mgag200_ttm_placement()
303 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; mgag200_ttm_placement()
305 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; mgag200_ttm_placement()
306 bo->placement.num_placement = c; mgag200_ttm_placement()
307 bo->placement.num_busy_placement = c; mgag200_ttm_placement()
309 bo->placements[i].fpfn = 0; mgag200_ttm_placement()
310 bo->placements[i].lpfn = 0; mgag200_ttm_placement()
332 mgabo->bo.bdev = &mdev->ttm.bdev; mgag200_bo_create()
339 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, mgag200_bo_create()
350 static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo) mgag200_bo_gpu_offset() argument
352 return bo->bo.offset; mgag200_bo_gpu_offset()
355 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) mgag200_bo_pin() argument
359 if (bo->pin_count) { mgag200_bo_pin()
360 bo->pin_count++; mgag200_bo_pin()
362 *gpu_addr = mgag200_bo_gpu_offset(bo); mgag200_bo_pin()
366 mgag200_ttm_placement(bo, pl_flag); mgag200_bo_pin()
367 for (i = 0; i < bo->placement.num_placement; i++) mgag200_bo_pin()
368 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; mgag200_bo_pin()
369 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); mgag200_bo_pin()
373 bo->pin_count = 1; mgag200_bo_pin()
375 *gpu_addr = mgag200_bo_gpu_offset(bo); mgag200_bo_pin()
379 int mgag200_bo_unpin(struct mgag200_bo *bo) mgag200_bo_unpin() argument
382 if (!bo->pin_count) { mgag200_bo_unpin()
383 DRM_ERROR("unpin bad %p\n", bo); mgag200_bo_unpin()
386 bo->pin_count--; mgag200_bo_unpin()
387 if (bo->pin_count) mgag200_bo_unpin()
390 for (i = 0; i < bo->placement.num_placement ; i++) mgag200_bo_unpin()
391 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; mgag200_bo_unpin()
392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); mgag200_bo_unpin()
399 int mgag200_bo_push_sysram(struct mgag200_bo *bo) mgag200_bo_push_sysram() argument
402 if (!bo->pin_count) { mgag200_bo_push_sysram()
403 DRM_ERROR("unpin bad %p\n", bo); mgag200_bo_push_sysram()
406 bo->pin_count--; mgag200_bo_push_sysram()
407 if (bo->pin_count) mgag200_bo_push_sysram()
410 if (bo->kmap.virtual) mgag200_bo_push_sysram()
411 ttm_bo_kunmap(&bo->kmap); mgag200_bo_push_sysram()
413 mgag200_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); mgag200_bo_push_sysram()
414 for (i = 0; i < bo->placement.num_placement ; i++) mgag200_bo_push_sysram()
415 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; mgag200_bo_push_sysram()
417 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); mgag200_bo_push_sysram()
H A Dmgag200_cursor.c44 struct mgag200_bo *bo = NULL; mga_crtc_cursor_set() local
119 bo = gem_to_mga_bo(obj); mga_crtc_cursor_set()
120 ret = mgag200_bo_reserve(bo, true); mga_crtc_cursor_set()
122 dev_err(&dev->pdev->dev, "failed to reserve user bo\n"); mga_crtc_cursor_set()
125 if (!bo->kmap.virtual) { mga_crtc_cursor_set()
126 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); mga_crtc_cursor_set()
136 this_colour = ioread32(bo->kmap.virtual + i); mga_crtc_cursor_set()
189 ret = ttm_bo_kmap(&pixels_prev->bo, 0, mga_crtc_cursor_set()
190 pixels_prev->bo.num_pages, mga_crtc_cursor_set()
202 this_colour = ioread32(bo->kmap.virtual + 4*(col + 64*row)); mga_crtc_cursor_set()
248 ttm_bo_kunmap(&bo->kmap); mga_crtc_cursor_set()
250 mgag200_bo_unreserve(bo); mga_crtc_cursor_set()
H A Dmgag200_drv.h223 struct ttm_buffer_object bo; member in struct:mgag200_bo
233 mgag200_bo(struct ttm_buffer_object *bo) mgag200_bo() argument
235 return container_of(bo, struct mgag200_bo, bo); mgag200_bo()
277 void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
279 static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) mgag200_bo_reserve() argument
283 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); mgag200_bo_reserve()
286 DRM_ERROR("reserve failed %p\n", bo); mgag200_bo_reserve()
292 static inline void mgag200_bo_unreserve(struct mgag200_bo *bo) mgag200_bo_unreserve() argument
294 ttm_bo_unreserve(&bo->bo); mgag200_bo_unreserve()
302 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr);
303 int mgag200_bo_unpin(struct mgag200_bo *bo);
304 int mgag200_bo_push_sysram(struct mgag200_bo *bo);
H A Dmgag200_fb.c27 struct mgag200_bo *bo; mga_dirty_update() local
37 bo = gem_to_mga_bo(obj); mga_dirty_update()
45 ret = mgag200_bo_reserve(bo, true); mga_dirty_update()
79 if (!bo->kmap.virtual) { mga_dirty_update()
80 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); mga_dirty_update()
83 mgag200_bo_unreserve(bo); mga_dirty_update()
91 memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp); mga_dirty_update()
95 ttm_bo_kunmap(&bo->kmap); mga_dirty_update()
97 mgag200_bo_unreserve(bo); mga_dirty_update()
170 struct mgag200_bo *bo; mgag200fb_create() local
188 bo = gem_to_mga_bo(gobj); mgag200fb_create()
H A Dmgag200_main.c316 static void mgag200_bo_unref(struct mgag200_bo **bo) mgag200_bo_unref() argument
320 if ((*bo) == NULL) mgag200_bo_unref()
323 tbo = &((*bo)->bo); mgag200_bo_unref()
325 *bo = NULL; mgag200_bo_unref()
336 static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo) mgag200_bo_mmap_offset() argument
338 return drm_vma_node_offset_addr(&bo->bo.vma_node); mgag200_bo_mmap_offset()
349 struct mgag200_bo *bo; mgag200_dumb_mmap_offset() local
358 bo = gem_to_mga_bo(obj); mgag200_dumb_mmap_offset()
359 *offset = mgag200_bo_mmap_offset(bo); mgag200_dumb_mmap_offset()
H A Dmgag200_mode.c730 struct mgag200_bo *bo; mga_crtc_do_set_base() local
738 bo = gem_to_mga_bo(obj); mga_crtc_do_set_base()
739 ret = mgag200_bo_reserve(bo, false); mga_crtc_do_set_base()
742 mgag200_bo_push_sysram(bo); mga_crtc_do_set_base()
743 mgag200_bo_unreserve(bo); mga_crtc_do_set_base()
748 bo = gem_to_mga_bo(obj); mga_crtc_do_set_base()
750 ret = mgag200_bo_reserve(bo, false); mga_crtc_do_set_base()
754 ret = mgag200_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); mga_crtc_do_set_base()
756 mgag200_bo_unreserve(bo); mga_crtc_do_set_base()
762 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); mga_crtc_do_set_base()
767 mgag200_bo_unreserve(bo); mga_crtc_do_set_base()
1284 struct mgag200_bo *bo = gem_to_mga_bo(obj); mga_crtc_disable() local
1285 ret = mgag200_bo_reserve(bo, false); mga_crtc_disable()
1288 mgag200_bo_push_sysram(bo); mga_crtc_disable()
1289 mgag200_bo_unreserve(bo); mga_crtc_disable()
/linux-4.1.27/drivers/gpu/drm/bochs/
H A Dbochs_mm.c10 static void bochs_ttm_placement(struct bochs_bo *bo, int domain);
76 struct bochs_bo *bo; bochs_bo_ttm_destroy() local
78 bo = container_of(tbo, struct bochs_bo, bo); bochs_bo_ttm_destroy()
79 drm_gem_object_release(&bo->gem); bochs_bo_ttm_destroy()
80 kfree(bo); bochs_bo_ttm_destroy()
83 static bool bochs_ttm_bo_is_bochs_bo(struct ttm_buffer_object *bo) bochs_ttm_bo_is_bochs_bo() argument
85 if (bo->destroy == &bochs_bo_ttm_destroy) bochs_ttm_bo_is_bochs_bo()
115 bochs_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) bochs_bo_evict_flags() argument
117 struct bochs_bo *bochsbo = bochs_bo(bo); bochs_bo_evict_flags()
119 if (!bochs_ttm_bo_is_bochs_bo(bo)) bochs_bo_evict_flags()
126 static int bochs_bo_verify_access(struct ttm_buffer_object *bo, bochs_bo_verify_access() argument
129 struct bochs_bo *bochsbo = bochs_bo(bo); bochs_bo_verify_access()
168 static int bochs_bo_move(struct ttm_buffer_object *bo, bochs_bo_move() argument
173 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); bochs_bo_move()
233 DRM_ERROR("Error initialising bo driver; %d\n", ret); bochs_mm_init()
258 static void bochs_ttm_placement(struct bochs_bo *bo, int domain) bochs_ttm_placement() argument
262 bo->placement.placement = bo->placements; bochs_ttm_placement()
263 bo->placement.busy_placement = bo->placements; bochs_ttm_placement()
265 bo->placements[c++].flags = TTM_PL_FLAG_WC bochs_ttm_placement()
270 bo->placements[c++].flags = TTM_PL_MASK_CACHING bochs_ttm_placement()
274 bo->placements[c++].flags = TTM_PL_MASK_CACHING bochs_ttm_placement()
278 bo->placements[i].fpfn = 0; bochs_ttm_placement()
279 bo->placements[i].lpfn = 0; bochs_ttm_placement()
281 bo->placement.num_placement = c; bochs_ttm_placement()
282 bo->placement.num_busy_placement = c; bochs_ttm_placement()
285 static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo) bochs_bo_gpu_offset() argument
287 return bo->bo.offset; bochs_bo_gpu_offset()
290 int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr) bochs_bo_pin() argument
294 if (bo->pin_count) { bochs_bo_pin()
295 bo->pin_count++; bochs_bo_pin()
297 *gpu_addr = bochs_bo_gpu_offset(bo); bochs_bo_pin()
301 bochs_ttm_placement(bo, pl_flag); bochs_bo_pin()
302 for (i = 0; i < bo->placement.num_placement; i++) bochs_bo_pin()
303 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; bochs_bo_pin()
304 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); bochs_bo_pin()
308 bo->pin_count = 1; bochs_bo_pin()
310 *gpu_addr = bochs_bo_gpu_offset(bo); bochs_bo_pin()
314 int bochs_bo_unpin(struct bochs_bo *bo) bochs_bo_unpin() argument
318 if (!bo->pin_count) { bochs_bo_unpin()
319 DRM_ERROR("unpin bad %p\n", bo); bochs_bo_unpin()
322 bo->pin_count--; bochs_bo_unpin()
324 if (bo->pin_count) bochs_bo_unpin()
327 for (i = 0; i < bo->placement.num_placement; i++) bochs_bo_unpin()
328 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; bochs_bo_unpin()
329 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); bochs_bo_unpin()
369 bochsbo->bo.bdev = &bochs->ttm.bdev; bochs_bo_create()
370 bochsbo->bo.bdev->dev_mapping = dev->anon_inode->i_mapping; bochs_bo_create()
377 ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, bochs_bo_create()
434 static void bochs_bo_unref(struct bochs_bo **bo) bochs_bo_unref() argument
438 if ((*bo) == NULL) bochs_bo_unref()
441 tbo = &((*bo)->bo); bochs_bo_unref()
443 *bo = NULL; bochs_bo_unref()
458 struct bochs_bo *bo; bochs_dumb_mmap_offset() local
467 bo = gem_to_bochs_bo(obj); bochs_dumb_mmap_offset()
468 *offset = bochs_bo_mmap_offset(bo); bochs_dumb_mmap_offset()
H A Dbochs_fbdev.c18 struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj); bochsfb_mmap() local
20 return ttm_fbdev_mmap(vma, &bo->bo); bochsfb_mmap()
65 struct bochs_bo *bo = NULL; bochsfb_create() local
78 /* alloc, pin & map bo */ bochsfb_create()
85 bo = gem_to_bochs_bo(gobj); bochsfb_create()
87 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); bochsfb_create()
91 ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); bochsfb_create()
94 ttm_bo_unreserve(&bo->bo); bochsfb_create()
98 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, bochsfb_create()
99 &bo->kmap); bochsfb_create()
102 ttm_bo_unreserve(&bo->bo); bochsfb_create()
106 ttm_bo_unreserve(&bo->bo); bochsfb_create()
135 info->screen_base = bo->kmap.virtual; bochsfb_create()
138 drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node); bochsfb_create()
H A Dbochs.h100 struct ttm_buffer_object bo; member in struct:bochs_bo
108 static inline struct bochs_bo *bochs_bo(struct ttm_buffer_object *bo) bochs_bo() argument
110 return container_of(bo, struct bochs_bo, bo); bochs_bo()
120 static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo) bochs_bo_mmap_offset() argument
122 return drm_vma_node_offset_addr(&bo->bo.vma_node); bochs_bo_mmap_offset()
154 int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr);
155 int bochs_bo_unpin(struct bochs_bo *bo);
H A Dbochs_kms.c46 struct bochs_bo *bo; bochs_crtc_mode_set_base() local
52 bo = gem_to_bochs_bo(bochs_fb->obj); bochs_crtc_mode_set_base()
53 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); bochs_crtc_mode_set_base()
55 DRM_ERROR("failed to reserve old_fb bo\n"); bochs_crtc_mode_set_base()
57 bochs_bo_unpin(bo); bochs_crtc_mode_set_base()
58 ttm_bo_unreserve(&bo->bo); bochs_crtc_mode_set_base()
66 bo = gem_to_bochs_bo(bochs_fb->obj); bochs_crtc_mode_set_base()
67 ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL); bochs_crtc_mode_set_base()
71 ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); bochs_crtc_mode_set_base()
73 ttm_bo_unreserve(&bo->bo); bochs_crtc_mode_set_base()
77 ttm_bo_unreserve(&bo->bo); bochs_crtc_mode_set_base()
/linux-4.1.27/drivers/gpu/drm/cirrus/
H A Dcirrus_ttm.c97 struct cirrus_bo *bo; cirrus_bo_ttm_destroy() local
99 bo = container_of(tbo, struct cirrus_bo, bo); cirrus_bo_ttm_destroy()
101 drm_gem_object_release(&bo->gem); cirrus_bo_ttm_destroy()
102 kfree(bo); cirrus_bo_ttm_destroy()
105 static bool cirrus_ttm_bo_is_cirrus_bo(struct ttm_buffer_object *bo) cirrus_ttm_bo_is_cirrus_bo() argument
107 if (bo->destroy == &cirrus_bo_ttm_destroy) cirrus_ttm_bo_is_cirrus_bo()
138 cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) cirrus_bo_evict_flags() argument
140 struct cirrus_bo *cirrusbo = cirrus_bo(bo); cirrus_bo_evict_flags()
142 if (!cirrus_ttm_bo_is_cirrus_bo(bo)) cirrus_bo_evict_flags()
149 static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) cirrus_bo_verify_access() argument
151 struct cirrus_bo *cirrusbo = cirrus_bo(bo); cirrus_bo_verify_access()
189 static int cirrus_bo_move(struct ttm_buffer_object *bo, cirrus_bo_move() argument
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); cirrus_bo_move()
267 DRM_ERROR("Error initialising bo driver; %d\n", ret); cirrus_mm_init()
298 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) cirrus_ttm_placement() argument
302 bo->placement.placement = bo->placements; cirrus_ttm_placement()
303 bo->placement.busy_placement = bo->placements; cirrus_ttm_placement()
305 bo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; cirrus_ttm_placement()
307 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; cirrus_ttm_placement()
309 bo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; cirrus_ttm_placement()
310 bo->placement.num_placement = c; cirrus_ttm_placement()
311 bo->placement.num_busy_placement = c; cirrus_ttm_placement()
313 bo->placements[i].fpfn = 0; cirrus_ttm_placement()
314 bo->placements[i].lpfn = 0; cirrus_ttm_placement()
336 cirrusbo->bo.bdev = &cirrus->ttm.bdev; cirrus_bo_create()
343 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, cirrus_bo_create()
354 static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo) cirrus_bo_gpu_offset() argument
356 return bo->bo.offset; cirrus_bo_gpu_offset()
359 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr) cirrus_bo_pin() argument
363 if (bo->pin_count) { cirrus_bo_pin()
364 bo->pin_count++; cirrus_bo_pin()
366 *gpu_addr = cirrus_bo_gpu_offset(bo); cirrus_bo_pin()
369 cirrus_ttm_placement(bo, pl_flag); cirrus_bo_pin()
370 for (i = 0; i < bo->placement.num_placement; i++) cirrus_bo_pin()
371 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; cirrus_bo_pin()
372 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); cirrus_bo_pin()
376 bo->pin_count = 1; cirrus_bo_pin()
378 *gpu_addr = cirrus_bo_gpu_offset(bo); cirrus_bo_pin()
382 int cirrus_bo_push_sysram(struct cirrus_bo *bo) cirrus_bo_push_sysram() argument
385 if (!bo->pin_count) { cirrus_bo_push_sysram()
386 DRM_ERROR("unpin bad %p\n", bo); cirrus_bo_push_sysram()
389 bo->pin_count--; cirrus_bo_push_sysram()
390 if (bo->pin_count) cirrus_bo_push_sysram()
393 if (bo->kmap.virtual) cirrus_bo_push_sysram()
394 ttm_bo_kunmap(&bo->kmap); cirrus_bo_push_sysram()
396 cirrus_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); cirrus_bo_push_sysram()
397 for (i = 0; i < bo->placement.num_placement ; i++) cirrus_bo_push_sysram()
398 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; cirrus_bo_push_sysram()
400 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); cirrus_bo_push_sysram()
H A Dcirrus_fbdev.c25 struct cirrus_bo *bo; cirrus_dirty_update() local
35 bo = gem_to_cirrus_bo(obj); cirrus_dirty_update()
43 ret = cirrus_bo_reserve(bo, true); cirrus_dirty_update()
76 if (!bo->kmap.virtual) { cirrus_dirty_update()
77 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); cirrus_dirty_update()
80 cirrus_bo_unreserve(bo); cirrus_dirty_update()
88 memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); cirrus_dirty_update()
92 ttm_bo_kunmap(&bo->kmap); cirrus_dirty_update()
94 cirrus_bo_unreserve(bo); cirrus_dirty_update()
176 struct cirrus_bo *bo = NULL; cirrusfb_create() local
192 bo = gem_to_cirrus_bo(gobj); cirrusfb_create()
H A Dcirrus_drv.h164 struct ttm_buffer_object bo; member in struct:cirrus_bo
174 cirrus_bo(struct ttm_buffer_object *bo) cirrus_bo() argument
176 return container_of(bo, struct cirrus_bo, bo); cirrus_bo()
240 void cirrus_ttm_placement(struct cirrus_bo *bo, int domain);
245 static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) cirrus_bo_reserve() argument
249 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL); cirrus_bo_reserve()
252 DRM_ERROR("reserve failed %p\n", bo); cirrus_bo_reserve()
258 static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) cirrus_bo_unreserve() argument
260 ttm_bo_unreserve(&bo->bo); cirrus_bo_unreserve()
263 int cirrus_bo_push_sysram(struct cirrus_bo *bo);
264 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
H A Dcirrus_main.c264 static void cirrus_bo_unref(struct cirrus_bo **bo) cirrus_bo_unref() argument
268 if ((*bo) == NULL) cirrus_bo_unref()
271 tbo = &((*bo)->bo); cirrus_bo_unref()
273 *bo = NULL; cirrus_bo_unref()
284 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo) cirrus_bo_mmap_offset() argument
286 return drm_vma_node_offset_addr(&bo->bo.vma_node); cirrus_bo_mmap_offset()
297 struct cirrus_bo *bo; cirrus_dumb_mmap_offset() local
306 bo = gem_to_cirrus_bo(obj); cirrus_dumb_mmap_offset()
307 *offset = cirrus_bo_mmap_offset(bo); cirrus_dumb_mmap_offset()
H A Dcirrus_mode.c137 struct cirrus_bo *bo; cirrus_crtc_do_set_base() local
145 bo = gem_to_cirrus_bo(obj); cirrus_crtc_do_set_base()
146 ret = cirrus_bo_reserve(bo, false); cirrus_crtc_do_set_base()
149 cirrus_bo_push_sysram(bo); cirrus_crtc_do_set_base()
150 cirrus_bo_unreserve(bo); cirrus_crtc_do_set_base()
155 bo = gem_to_cirrus_bo(obj); cirrus_crtc_do_set_base()
157 ret = cirrus_bo_reserve(bo, false); cirrus_crtc_do_set_base()
161 ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); cirrus_crtc_do_set_base()
163 cirrus_bo_unreserve(bo); cirrus_crtc_do_set_base()
169 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); cirrus_crtc_do_set_base()
173 cirrus_bo_unreserve(bo); cirrus_crtc_do_set_base()
/linux-4.1.27/include/linux/
H A Dhost1x.h61 struct host1x_bo *(*get)(struct host1x_bo *bo);
62 void (*put)(struct host1x_bo *bo);
63 dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
64 void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
65 void *(*mmap)(struct host1x_bo *bo);
66 void (*munmap)(struct host1x_bo *bo, void *addr);
67 void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
68 void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
75 static inline void host1x_bo_init(struct host1x_bo *bo, host1x_bo_init() argument
78 bo->ops = ops; host1x_bo_init()
81 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo) host1x_bo_get() argument
83 return bo->ops->get(bo); host1x_bo_get()
86 static inline void host1x_bo_put(struct host1x_bo *bo) host1x_bo_put() argument
88 bo->ops->put(bo); host1x_bo_put()
91 static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo, host1x_bo_pin() argument
94 return bo->ops->pin(bo, sgt); host1x_bo_pin()
97 static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) host1x_bo_unpin() argument
99 bo->ops->unpin(bo, sgt); host1x_bo_unpin()
102 static inline void *host1x_bo_mmap(struct host1x_bo *bo) host1x_bo_mmap() argument
104 return bo->ops->mmap(bo); host1x_bo_mmap()
107 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr) host1x_bo_munmap() argument
109 bo->ops->munmap(bo, addr); host1x_bo_munmap()
112 static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum) host1x_bo_kmap() argument
114 return bo->ops->kmap(bo, pagenum); host1x_bo_kmap()
117 static inline void host1x_bo_kunmap(struct host1x_bo *bo, host1x_bo_kunmap() argument
120 bo->ops->kunmap(bo, pagenum, addr); host1x_bo_kunmap()
169 struct host1x_bo *bo; member in struct:host1x_reloc::__anon11719
173 struct host1x_bo *bo; member in struct:host1x_reloc::__anon11720
H A Dreservation.h13 * Based on bo.c which bears the following copyright notice,
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnv50_fence.c40 struct ttm_mem_reg *mem = &priv->bo->bo.mem; nv50_fence_context_new()
65 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); nv50_fence_context_new() local
66 u32 start = bo->bo.mem.start * PAGE_SIZE; nv50_fence_context_new()
67 u32 limit = start + bo->bo.mem.size - 1; nv50_fence_context_new()
103 0, 0x0000, NULL, NULL, &priv->bo); nv50_fence_create()
105 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); nv50_fence_create()
107 ret = nouveau_bo_map(priv->bo); nv50_fence_create()
109 nouveau_bo_unpin(priv->bo); nv50_fence_create()
112 nouveau_bo_ref(NULL, &priv->bo); nv50_fence_create()
120 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); nv50_fence_create()
H A Dnv10_fence.h15 struct nouveau_bo *bo; member in struct:nv10_fence_priv
H A Dnouveau_gem.c39 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_gem_object_del()
40 struct ttm_buffer_object *bo = &nvbo->bo; nouveau_gem_object_del() local
49 drm_prime_gem_destroy(gem, nvbo->bo.sg); nouveau_gem_object_del()
55 ttm_bo_unref(&bo); nouveau_gem_object_del()
66 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_gem_object_open()
74 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); nouveau_gem_object_open()
101 ttm_bo_unreserve(&nvbo->bo); nouveau_gem_object_open()
117 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; nouveau_gem_object_unmap()
118 struct reservation_object *resv = nvbo->bo.resv; nouveau_gem_object_unmap()
127 ttm_bo_wait(&nvbo->bo, true, false, false); nouveau_gem_object_unmap()
132 fence = reservation_object_get_excl(nvbo->bo.resv); nouveau_gem_object_unmap()
149 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_gem_object_close()
157 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); nouveau_gem_object_close()
172 ttm_bo_unreserve(&nvbo->bo); nouveau_gem_object_close()
212 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); nouveau_gem_new()
218 nvbo->bo.persistent_swap_storage = nvbo->gem.filp; nouveau_gem_new()
232 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) nouveau_gem_info()
236 rep->offset = nvbo->bo.offset; nouveau_gem_info()
245 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; nouveau_gem_info()
246 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); nouveau_gem_info()
291 struct ttm_buffer_object *bo = &nvbo->bo; nouveau_gem_set_domain() local
306 bo->mem.mem_type == TTM_PL_VRAM) nouveau_gem_set_domain()
310 bo->mem.mem_type == TTM_PL_TT) nouveau_gem_set_domain()
350 ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket); validate_fini_no_ticket()
410 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); validate_init()
417 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, validate_init()
499 if (nvbo->bo.offset == b->presumed.offset && list_for_each_entry()
500 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && list_for_each_entry()
502 (nvbo->bo.mem.mem_type == TTM_PL_TT && list_for_each_entry()
506 if (nvbo->bo.mem.mem_type == TTM_PL_TT) list_for_each_entry()
510 b->presumed.offset = nvbo->bo.offset; list_for_each_entry()
548 NV_PRINTK(error, cli, "validating bo list\n"); nouveau_gem_pushbuf_validate()
590 struct drm_nouveau_gem_pushbuf_bo *bo) nouveau_gem_pushbuf_reloc_apply()
607 NV_PRINTK(error, cli, "reloc bo index invalid\n"); nouveau_gem_pushbuf_reloc_apply()
612 b = &bo[r->bo_index]; nouveau_gem_pushbuf_reloc_apply()
617 NV_PRINTK(error, cli, "reloc container bo index invalid\n"); nouveau_gem_pushbuf_reloc_apply()
621 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; nouveau_gem_pushbuf_reloc_apply()
624 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { nouveau_gem_pushbuf_reloc_apply()
625 NV_PRINTK(error, cli, "reloc outside of bo\n"); nouveau_gem_pushbuf_reloc_apply()
631 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, nouveau_gem_pushbuf_reloc_apply()
655 ret = ttm_bo_wait(&nvbo->bo, true, false, false); nouveau_gem_pushbuf_reloc_apply()
678 struct drm_nouveau_gem_pushbuf_bo *bo; nouveau_gem_ioctl_pushbuf() local
709 NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n", nouveau_gem_ioctl_pushbuf()
724 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); nouveau_gem_ioctl_pushbuf()
725 if (IS_ERR(bo)) { nouveau_gem_ioctl_pushbuf()
727 return nouveau_abi16_put(abi16, PTR_ERR(bo)); nouveau_gem_ioctl_pushbuf()
740 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, nouveau_gem_ioctl_pushbuf()
750 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); nouveau_gem_ioctl_pushbuf()
766 bo[push[i].bo_index].user_priv; nouveau_gem_ioctl_pushbuf()
781 bo[push[i].bo_index].user_priv; nouveau_gem_ioctl_pushbuf()
783 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); nouveau_gem_ioctl_pushbuf()
795 bo[push[i].bo_index].user_priv; nouveau_gem_ioctl_pushbuf()
802 ret = ttm_bo_kmap(&nvbo->bo, 0, nouveau_gem_ioctl_pushbuf()
803 nvbo->bo.mem. nouveau_gem_ioctl_pushbuf()
818 (nvbo->bo.offset + push[i].offset)); nouveau_gem_ioctl_pushbuf()
833 validate_fini(&op, fence, bo); nouveau_gem_ioctl_pushbuf()
837 u_free(bo); nouveau_gem_ioctl_pushbuf()
874 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY; nouveau_gem_ioctl_cpu_prep()
878 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ); nouveau_gem_ioctl_cpu_prep()
588 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_bo *bo) nouveau_gem_pushbuf_reloc_apply() argument
H A Dnouveau_bo.h11 struct ttm_buffer_object bo; member in struct:nouveau_bo
45 nouveau_bo(struct ttm_buffer_object *bo) nouveau_bo() argument
47 return container_of(bo, struct nouveau_bo, bo); nouveau_bo()
59 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL; nouveau_bo_ref()
61 struct ttm_buffer_object *bo = &prev->bo; nouveau_bo_ref() local
63 ttm_bo_unref(&bo); nouveau_bo_ref()
H A Dnouveau_bo.c134 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_ttm() argument
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_del_ttm()
138 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_del_ttm()
141 DRM_ERROR("bo %p still attached to GEM object\n", bo); nouveau_bo_del_ttm()
151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_fixup_align()
215 nvbo->bo.bdev = &drm->ttm.bdev; nouveau_bo_new()
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_new()
233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, nouveau_bo_new()
262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); set_placement_range()
268 nvbo->bo.mem.num_pages < vram_pages / 4) { set_placement_range()
315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_pin()
316 struct ttm_buffer_object *bo = &nvbo->bo; nouveau_bo_pin() local
320 ret = ttm_bo_reserve(bo, false, false, false, NULL); nouveau_bo_pin()
327 if (bo->mem.mem_type == TTM_PL_VRAM) { nouveau_bo_pin()
328 struct nvkm_mem *mem = bo->mem.mm_node; nouveau_bo_pin()
338 if (!(memtype & (1 << bo->mem.mem_type)) || evict) { nouveau_bo_pin()
339 NV_ERROR(drm, "bo %p pinned elsewhere: " nouveau_bo_pin()
340 "0x%08x vs 0x%08x\n", bo, nouveau_bo_pin()
341 1 << bo->mem.mem_type, memtype); nouveau_bo_pin()
368 switch (bo->mem.mem_type) { nouveau_bo_pin()
370 drm->gem.vram_available -= bo->mem.size; nouveau_bo_pin()
373 drm->gem.gart_available -= bo->mem.size; nouveau_bo_pin()
382 ttm_bo_unreserve(bo); nouveau_bo_pin()
389 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_unpin()
390 struct ttm_buffer_object *bo = &nvbo->bo; nouveau_bo_unpin() local
393 ret = ttm_bo_reserve(bo, false, false, false, NULL); nouveau_bo_unpin()
402 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); nouveau_bo_unpin()
406 switch (bo->mem.mem_type) { nouveau_bo_unpin()
408 drm->gem.vram_available += bo->mem.size; nouveau_bo_unpin()
411 drm->gem.gart_available += bo->mem.size; nouveau_bo_unpin()
419 ttm_bo_unreserve(bo); nouveau_bo_unpin()
428 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); nouveau_bo_map()
437 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, nouveau_bo_map()
440 ttm_bo_unreserve(&nvbo->bo); nouveau_bo_map()
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_sync_for_device()
463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; nouveau_bo_sync_for_device()
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); nouveau_bo_sync_for_cpu()
483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; nouveau_bo_sync_for_cpu()
504 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, nouveau_bo_validate()
527 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; _nouveau_bo_mem_index()
664 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) nouveau_bo_evict_flags() argument
666 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_evict_flags()
668 switch (bo->mem.mem_type) { nouveau_bo_evict_flags()
695 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nve0_bo_move_copy() argument
727 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_copy() argument
765 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nvc0_bo_move_m2mf() argument
804 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nva3_bo_move_copy() argument
842 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv98_bo_move_exec() argument
860 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv84_bo_move_exec() argument
894 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv50_bo_move_m2mf() argument
981 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, nouveau_bo_mem_ctxdma() argument
990 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, nv04_bo_move_m2mf() argument
1003 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); nv04_bo_move_m2mf()
1004 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); nv04_bo_move_m2mf()
1036 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, nouveau_bo_move_prep() argument
1039 struct nvkm_mem *old_node = bo->mem.mm_node; nouveau_bo_move_prep()
1062 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, nouveau_bo_move_m2mf() argument
1065 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_move_m2mf()
1076 ret = nouveau_bo_move_prep(drm, bo, new_mem); nouveau_bo_move_m2mf()
1082 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); nouveau_bo_move_m2mf()
1084 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); nouveau_bo_move_m2mf()
1088 ret = ttm_bo_move_accel_cleanup(bo, nouveau_bo_move_m2mf()
1162 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flipd() argument
1179 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); nouveau_bo_move_flipd()
1183 ret = ttm_tt_bind(bo->ttm, &tmp_mem); nouveau_bo_move_flipd()
1187 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); nouveau_bo_move_flipd()
1191 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); nouveau_bo_move_flipd()
1193 ttm_bo_mem_put(bo, &tmp_mem); nouveau_bo_move_flipd()
1198 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move_flips() argument
1215 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); nouveau_bo_move_flips()
1219 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); nouveau_bo_move_flips()
1223 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); nouveau_bo_move_flips()
1228 ttm_bo_mem_put(bo, &tmp_mem); nouveau_bo_move_flips()
1233 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) nouveau_bo_move_ntfy() argument
1235 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_move_ntfy()
1239 if (bo->destroy != nouveau_bo_del_ttm) nouveau_bo_move_ntfy()
1254 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, nouveau_bo_vm_bind() argument
1257 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_vm_bind()
1259 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_vm_bind()
1276 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, nouveau_bo_vm_cleanup() argument
1280 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_vm_cleanup()
1282 struct fence *fence = reservation_object_get_excl(bo->resv); nouveau_bo_vm_cleanup()
1289 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, nouveau_bo_move() argument
1292 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_bo_move()
1293 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_move()
1294 struct ttm_mem_reg *old_mem = &bo->mem; nouveau_bo_move()
1302 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); nouveau_bo_move()
1307 /* Fake bo copy. */ nouveau_bo_move()
1308 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { nouveau_bo_move()
1309 BUG_ON(bo->mem.mm_node != NULL); nouveau_bo_move()
1310 bo->mem = *new_mem; nouveau_bo_move()
1318 ret = nouveau_bo_move_flipd(bo, evict, intr, nouveau_bo_move()
1321 ret = nouveau_bo_move_flips(bo, evict, intr, nouveau_bo_move()
1324 ret = nouveau_bo_move_m2mf(bo, evict, intr, nouveau_bo_move()
1331 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); nouveau_bo_move()
1333 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); nouveau_bo_move()
1338 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); nouveau_bo_move()
1340 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); nouveau_bo_move()
1347 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) nouveau_bo_verify_access() argument
1349 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_bo_verify_access()
1420 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nouveau_ttm_fault_reserve_notify() argument
1422 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_ttm_fault_reserve_notify()
1423 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_ttm_fault_reserve_notify()
1428 /* as long as the bo isn't in vram, and isn't tiled, we've got nouveau_ttm_fault_reserve_notify()
1431 if (bo->mem.mem_type != TTM_PL_VRAM) { nouveau_ttm_fault_reserve_notify()
1436 if (bo->mem.mem_type == TTM_PL_SYSTEM) { nouveau_ttm_fault_reserve_notify()
1446 /* make sure bo is in mappable vram */ nouveau_ttm_fault_reserve_notify()
1448 bo->mem.start + bo->mem.num_pages < mappable) nouveau_ttm_fault_reserve_notify()
1595 struct reservation_object *resv = nvbo->bo.resv; nouveau_bo_fence()
1634 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; nouveau_bo_vma_add()
1642 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && nouveau_bo_vma_add()
1643 (nvbo->bo.mem.mem_type == TTM_PL_VRAM || nouveau_bo_vma_add()
1645 nvkm_vm_map(vma, nvbo->bo.mem.mm_node); nouveau_bo_vma_add()
1656 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) nouveau_bo_vma_del()
H A Dnv84_fence.c107 return nouveau_bo_rd32(priv->bo, chan->chid * 16/4); nv84_fence_read()
119 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); nv84_fence_context_del() local
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]); nv84_fence_context_del()
123 nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); nv84_fence_context_del()
124 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nv84_fence_context_del()
125 nouveau_bo_vma_del(priv->bo, &fctx->vma); nv84_fence_context_del()
151 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); nv84_fence_context_new()
159 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); nv84_fence_context_new() local
160 ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); nv84_fence_context_new()
177 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); nv84_fence_suspend()
191 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); nv84_fence_resume()
205 nouveau_bo_unmap(priv->bo); nv84_fence_destroy()
206 if (priv->bo) nv84_fence_destroy()
207 nouveau_bo_unpin(priv->bo); nv84_fence_destroy()
208 nouveau_bo_ref(NULL, &priv->bo); nv84_fence_destroy()
243 0, NULL, NULL, &priv->bo); nv84_fence_create()
245 ret = nouveau_bo_pin(priv->bo, domain, false); nv84_fence_create()
247 ret = nouveau_bo_map(priv->bo); nv84_fence_create()
249 nouveau_bo_unpin(priv->bo); nv84_fence_create()
252 nouveau_bo_ref(NULL, &priv->bo); nv84_fence_create()
H A Dnv17_fence.c78 struct ttm_mem_reg *mem = &priv->bo->bo.mem; nv17_fence_context_new()
110 nouveau_bo_wr32(priv->bo, 0, priv->sequence); nv17_fence_resume()
132 0, 0x0000, NULL, NULL, &priv->bo); nv17_fence_create()
134 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); nv17_fence_create()
136 ret = nouveau_bo_map(priv->bo); nv17_fence_create()
138 nouveau_bo_unpin(priv->bo); nv17_fence_create()
141 nouveau_bo_ref(NULL, &priv->bo); nv17_fence_create()
149 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); nv17_fence_create()
H A Dnouveau_prime.c34 int npages = nvbo->bo.num_pages; nouveau_gem_prime_get_sg_table()
36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); nouveau_gem_prime_get_sg_table()
44 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, nouveau_gem_prime_vmap()
81 ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); nouveau_gem_prime_import_sg_table()
114 return nvbo->bo.resv; nouveau_gem_prime_res_obj()
H A Dnv10_fence.c89 nouveau_bo_unmap(priv->bo); nv10_fence_destroy()
90 if (priv->bo) nv10_fence_destroy()
91 nouveau_bo_unpin(priv->bo); nv10_fence_destroy()
92 nouveau_bo_ref(NULL, &priv->bo); nv10_fence_destroy()
H A Dnouveau_display.c643 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset); nouveau_display_resume()
727 ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL); nouveau_crtc_page_flip()
734 ttm_bo_unreserve(&new_bo->bo); nouveau_crtc_page_flip()
739 ttm_bo_unreserve(&new_bo->bo); nouveau_crtc_page_flip()
741 ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL); nouveau_crtc_page_flip()
750 new_bo->bo.offset }; nouveau_crtc_page_flip()
791 ttm_bo_unreserve(&old_bo->bo); nouveau_crtc_page_flip()
799 ttm_bo_unreserve(&old_bo->bo); nouveau_crtc_page_flip()
871 struct nouveau_bo *bo; nouveau_display_dumb_create() local
885 ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo); nouveau_display_dumb_create()
889 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); nouveau_display_dumb_create()
890 drm_gem_object_unreference_unlocked(&bo->gem); nouveau_display_dumb_create()
903 struct nouveau_bo *bo = nouveau_gem_object(gem); nouveau_display_dumb_map_offset() local
904 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); nouveau_display_dumb_map_offset()
H A Dnouveau_ttm.c74 struct ttm_buffer_object *bo, nouveau_vram_manager_new()
80 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_vram_manager_new()
163 struct ttm_buffer_object *bo, nouveau_gart_manager_new()
167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); nouveau_gart_manager_new()
168 struct nouveau_bo *nvbo = nouveau_bo(bo); nouveau_gart_manager_new()
243 struct ttm_buffer_object *bo, nv04_gart_manager_new()
388 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); nouveau_ttm_init()
73 nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) nouveau_vram_manager_new() argument
162 nouveau_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) nouveau_gart_manager_new() argument
242 nv04_gart_manager_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) nv04_gart_manager_new() argument
H A Dnouveau_fence.h99 struct nouveau_bo *bo; member in struct:nv84_fence_priv
H A Dnouveau_abi16.c294 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) nouveau_abi16_ioctl_channel_alloc()
467 args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset; nouveau_abi16_ioctl_notifierobj_alloc()
468 args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset; nouveau_abi16_ioctl_notifierobj_alloc()
473 args.ctxdma.start += chan->ntfy->bo.offset; nouveau_abi16_ioctl_notifierobj_alloc()
474 args.ctxdma.limit += chan->ntfy->bo.offset; nouveau_abi16_ioctl_notifierobj_alloc()
H A Dnouveau_fbcon.c426 info->fix.smem_start = nvbo->bo.mem.bus.base + nouveau_fbcon_create()
427 nvbo->bo.mem.bus.offset; nouveau_fbcon_create()
445 NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", nouveau_fbcon_create()
447 nvbo->bo.offset, nvbo); nouveau_fbcon_create()
H A Dnouveau_dma.c82 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, nv50_dma_push() argument
91 vma = nouveau_bo_vma_find(bo, cli->vm); nv50_dma_push()
H A Dnv50_display.c51 /* offsets in shared sync bo of various structures */
649 evo_data(push, nv_fb->nvbo->bo.offset >> 8); nv50_display_flip_next()
656 evo_data(push, nv_fb->nvbo->bo.offset >> 8); nv50_display_flip_next()
888 evo_data(push, nvfb->nvbo->bo.offset >> 8); nv50_crtc_set_image()
901 evo_data(push, nvfb->nvbo->bo.offset >> 8); nv50_crtc_set_image()
931 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); nv50_crtc_cursor_show()
936 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); nv50_crtc_cursor_show()
942 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); nv50_crtc_cursor_show()
1055 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); nv50_crtc_commit()
1062 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); nv50_crtc_commit()
1070 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8); nv50_crtc_commit()
1460 ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, nv50_crtc_create()
1473 ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset, nv50_crtc_create()
2505 ret = nv50_core_create(disp->disp, disp->sync->bo.offset, nv50_display_create()
H A Dnouveau_drm.h30 * - allow concurrent access to bo's mapped read/write.
H A Dnouveau_chan.c126 chan->push.vma.offset = chan->push.buffer->bo.offset; nouveau_channel_prep()
141 if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { nouveau_channel_prep()
H A Dnouveau_fence.c395 struct reservation_object *resv = nvbo->bo.resv; nouveau_fence_sync()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_ringbuffer.c35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); msm_ringbuffer_new()
36 if (IS_ERR(ring->bo)) { msm_ringbuffer_new()
37 ret = PTR_ERR(ring->bo); msm_ringbuffer_new()
38 ring->bo = NULL; msm_ringbuffer_new()
42 ring->start = msm_gem_vaddr_locked(ring->bo); msm_ringbuffer_new()
58 if (ring->bo) msm_ringbuffer_destroy()
59 drm_gem_object_unreference_unlocked(ring->bo); msm_ringbuffer_destroy()
H A Dmsm_fbdev.c37 struct drm_gem_object *bo; member in struct:msm_fbdev
64 struct drm_gem_object *drm_obj = fbdev->bo; msm_fbdev_mmap()
109 /* allocate backing bo */ msm_fbdev_create()
113 fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | msm_fbdev_create()
116 if (IS_ERR(fbdev->bo)) { msm_fbdev_create()
117 ret = PTR_ERR(fbdev->bo); msm_fbdev_create()
118 fbdev->bo = NULL; msm_fbdev_create()
123 fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo); msm_fbdev_create()
127 * to unref the bo: msm_fbdev_create()
129 drm_gem_object_unreference(fbdev->bo); msm_fbdev_create()
141 ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr); msm_fbdev_create()
177 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); msm_fbdev_create()
178 fbi->screen_size = fbdev->bo->size; msm_fbdev_create()
180 fbi->fix.smem_len = fbdev->bo->size; msm_fbdev_create()
H A Dmsm_gem.h37 * block on submit if a bo is already on other ring
46 * the duration of the ioctl, so one bo can never be on multiple
60 /* normally (resv == &_resv) except for imported bo's */
91 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
H A Dmsm_fb.c51 struct drm_gem_object *bo = msm_fb->planes[i]; msm_framebuffer_destroy() local
52 if (bo) msm_framebuffer_destroy()
53 drm_gem_object_unreference_unlocked(bo); msm_framebuffer_destroy()
90 /* prepare/pin all the fb's bo's for scanout. Note that it is not valid
H A Dmsm_ringbuffer.h26 struct drm_gem_object *bo; member in struct:msm_ringbuffer
H A Dmsm_gem_submit.c47 /* initially, until copy_from_user() and bo lookup succeeds: */ submit_create()
138 /* This is where we make sure all the bo's are reserved and pin'd: */ submit_validate_objects()
164 /* if locking succeeded, pin bo: */ submit_validate_objects()
H A Dmsm_gpu.c454 /* call from irq handler to schedule work to retire bo's */ msm_gpu_retire()
462 /* add bo's to gpu's ring, and kick gpu: */ msm_gpu_submit()
496 /* ring takes a reference to the bo and iova: */ msm_gpu_submit()
645 msm_gem_put_iova(gpu->rb->bo, gpu->id); msm_gpu_cleanup()
H A Dmsm_rd.c304 * all the bo's associated with the submit. Handy to see vtx msm_rd_dump_submit()
305 * buffers, etc. For now just the cmdstream bo's is enough. msm_rd_dump_submit()
H A Dmsm_atomic.c74 * bo's.. commit_init()
H A Dmsm_gem.c325 * bo is deleted: msm_gem_get_iova()
411 /* setup callback for when bo is no longer busy..
H A Dmsm_drv.h105 /* callbacks deferred until bo is inactive: */
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_dmabuf.c45 * Flushes and unpins the query bo to avoid failures.
55 struct ttm_buffer_object *bo = &buf->base; vmw_dmabuf_to_placement() local
64 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); vmw_dmabuf_to_placement()
68 ret = ttm_bo_validate(bo, placement, interruptible, false); vmw_dmabuf_to_placement()
70 ttm_bo_unreserve(bo); vmw_dmabuf_to_placement()
83 * Flushes and unpins the query bo if @pin == true to avoid failures.
97 struct ttm_buffer_object *bo = &buf->base; vmw_dmabuf_to_vram_or_gmr() local
108 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); vmw_dmabuf_to_vram_or_gmr()
124 ret = ttm_bo_validate(bo, placement, interruptible, false); vmw_dmabuf_to_vram_or_gmr()
139 ret = ttm_bo_validate(bo, placement, interruptible, false); vmw_dmabuf_to_vram_or_gmr()
142 ttm_bo_unreserve(bo); vmw_dmabuf_to_vram_or_gmr()
185 * Flushes and unpins the query bo if @pin == true to avoid failures.
199 struct ttm_buffer_object *bo = &buf->base; vmw_dmabuf_to_start_of_vram() local
208 place.lpfn = bo->num_pages; vmw_dmabuf_to_start_of_vram()
221 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL); vmw_dmabuf_to_start_of_vram()
226 if (bo->mem.mem_type == TTM_PL_VRAM && vmw_dmabuf_to_start_of_vram()
227 bo->mem.start < bo->num_pages && vmw_dmabuf_to_start_of_vram()
228 bo->mem.start > 0) vmw_dmabuf_to_start_of_vram()
229 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); vmw_dmabuf_to_start_of_vram()
231 ret = ttm_bo_validate(bo, &placement, interruptible, false); vmw_dmabuf_to_start_of_vram()
234 WARN_ON(ret == 0 && bo->offset != 0); vmw_dmabuf_to_start_of_vram()
236 ttm_bo_unreserve(bo); vmw_dmabuf_to_start_of_vram()
277 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
280 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, vmw_bo_get_guest_ptr() argument
283 if (bo->mem.mem_type == TTM_PL_VRAM) { vmw_bo_get_guest_ptr()
285 ptr->offset = bo->offset; vmw_bo_get_guest_ptr()
287 ptr->gmrId = bo->mem.start; vmw_bo_get_guest_ptr()
296 * @bo: The buffer object. Must be reserved.
300 void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin) vmw_bo_pin() argument
304 uint32_t old_mem_type = bo->mem.mem_type; vmw_bo_pin()
307 lockdep_assert_held(&bo->resv->lock.base); vmw_bo_pin()
320 ret = ttm_bo_validate(bo, &placement, false, true); vmw_bo_pin()
322 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); vmw_bo_pin()
H A Dvmwgfx_resource.c73 vmw_dma_buffer(struct ttm_buffer_object *bo) vmw_dma_buffer() argument
75 return container_of(bo, struct vmw_dma_buffer, base); vmw_dma_buffer()
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo) vmw_user_dma_buffer() argument
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); vmw_user_dma_buffer()
128 struct ttm_buffer_object *bo = &res->backup->base; vmw_resource_release() local
130 ttm_bo_reserve(bo, false, false, false, NULL); vmw_resource_release()
135 val_buf.bo = bo; vmw_resource_release()
141 ttm_bo_unreserve(bo); vmw_resource_release()
398 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) vmw_dmabuf_bo_free() argument
400 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); vmw_dmabuf_bo_free()
405 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) vmw_user_dmabuf_destroy() argument
407 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); vmw_user_dmabuf_destroy()
416 void (*bo_free) (struct ttm_buffer_object *bo)) vmw_dmabuf_init()
441 struct ttm_buffer_object *bo; vmw_user_dmabuf_release() local
450 bo = &vmw_user_bo->dma.base; vmw_user_dmabuf_release()
451 ttm_bo_unref(&bo); vmw_user_dmabuf_release()
535 * @bo: Pointer to the buffer object being accessed
538 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, vmw_user_dmabuf_verify_access() argument
543 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy)) vmw_user_dmabuf_verify_access()
546 vmw_user_bo = vmw_user_dma_buffer(bo); vmw_user_dmabuf_verify_access()
571 struct ttm_buffer_object *bo = &user_bo->dma.base; vmw_user_dmabuf_synccpu_grab() local
580 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; vmw_user_dmabuf_synccpu_grab()
582 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); vmw_user_dmabuf_synccpu_grab()
591 (bo, !!(flags & drm_vmw_synccpu_dontblock)); vmw_user_dmabuf_synccpu_grab()
1138 val_buf->bo != NULL) || vmw_resource_do_validate()
1139 (!func->needs_backup && val_buf->bo != NULL))) { vmw_resource_do_validate()
1235 val_buf->bo = ttm_bo_reference(&res->backup->base); vmw_resource_check_buffer()
1258 ttm_bo_unref(&val_buf->bo); vmw_resource_check_buffer()
1305 if (likely(val_buf->bo == NULL)) vmw_resource_backoff_reservation()
1311 ttm_bo_unref(&val_buf->bo); vmw_resource_backoff_reservation()
1329 val_buf.bo = NULL; vmw_resource_do_evict()
1375 val_buf.bo = NULL; vmw_resource_validate()
1378 val_buf.bo = &res->backup->base; vmw_resource_validate()
1432 * @bo: Pointer to the struct ttm_buffer_object to fence.
1440 void vmw_fence_single_bo(struct ttm_buffer_object *bo, vmw_fence_single_bo() argument
1443 struct ttm_bo_device *bdev = bo->bdev; vmw_fence_single_bo()
1450 reservation_object_add_excl_fence(bo->resv, &fence->base); vmw_fence_single_bo()
1453 reservation_object_add_excl_fence(bo->resv, &fence->base); vmw_fence_single_bo()
1459 * @bo: The TTM buffer object about to move.
1467 * resource that remain static while bo::res is !NULL and
1468 * while we have @bo reserved. struct resource::backup is *not* a
1470 * to set @bo::res to NULL, while having @bo reserved when the
1471 * buffer is no longer bound to the resource, so @bo:res can be
1475 void vmw_resource_move_notify(struct ttm_buffer_object *bo, vmw_resource_move_notify() argument
1483 if (bo->destroy != vmw_dmabuf_bo_free && vmw_resource_move_notify()
1484 bo->destroy != vmw_user_dmabuf_destroy) vmw_resource_move_notify()
1487 dma_buf = container_of(bo, struct vmw_dma_buffer, base); vmw_resource_move_notify()
1493 val_buf.bo = bo; vmw_resource_move_notify()
1507 (void) ttm_bo_wait(bo, false, false, false); vmw_resource_move_notify()
412 vmw_dmabuf_init(struct vmw_private *dev_priv, struct vmw_dma_buffer *vmw_bo, size_t size, struct ttm_placement *placement, bool interruptible, void (*bo_free) (struct ttm_buffer_object *bo)) vmw_dmabuf_init() argument
H A Dvmwgfx_buffer.c494 * @bo: Pointer to a struct ttm_buffer_object
501 int vmw_bo_map_dma(struct ttm_buffer_object *bo) vmw_bo_map_dma() argument
504 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_map_dma()
513 * @bo: Pointer to a struct ttm_buffer_object
518 void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) vmw_bo_unmap_dma() argument
521 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_unmap_dma()
531 * @bo: Pointer to a struct ttm_buffer_object
538 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) vmw_bo_sg_table() argument
541 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_sg_table()
737 * one slot per bo. There is an upper limit of the number of vmw_init_mem_type()
738 * slots as well as the bo size. vmw_init_mem_type()
753 static void vmw_evict_flags(struct ttm_buffer_object *bo, vmw_evict_flags() argument
759 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) vmw_verify_access() argument
764 return vmw_user_dmabuf_verify_access(bo, tfile); vmw_verify_access()
799 static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) vmw_ttm_fault_reserve_notify() argument
807 * @bo: The TTM buffer object about to move.
814 static void vmw_move_notify(struct ttm_buffer_object *bo, vmw_move_notify() argument
817 vmw_resource_move_notify(bo, mem); vmw_move_notify()
824 * @bo: The TTM buffer object about to be swapped out.
826 static void vmw_swap_notify(struct ttm_buffer_object *bo) vmw_swap_notify() argument
828 ttm_bo_wait(bo, false, false, false); vmw_swap_notify()
H A Dvmwgfx_mob.c184 struct ttm_buffer_object *bo; vmw_takedown_otable_base() local
189 bo = otable->page_table->pt_bo; vmw_takedown_otable_base()
206 if (bo) { vmw_takedown_otable_base()
209 ret = ttm_bo_reserve(bo, false, true, false, NULL); vmw_takedown_otable_base()
212 vmw_fence_single_bo(bo, NULL); vmw_takedown_otable_base()
213 ttm_bo_unreserve(bo); vmw_takedown_otable_base()
321 struct ttm_buffer_object *bo = dev_priv->otable_bo; vmw_otables_takedown() local
328 ret = ttm_bo_reserve(bo, false, true, false, NULL); vmw_otables_takedown()
331 vmw_fence_single_bo(bo, NULL); vmw_otables_takedown()
332 ttm_bo_unreserve(bo); vmw_otables_takedown()
500 struct ttm_buffer_object *bo = mob->pt_bo; vmw_mob_pt_setup() local
506 ret = ttm_bo_reserve(bo, false, true, false, NULL); vmw_mob_pt_setup()
509 vsgt = vmw_bo_sg_table(bo); vmw_mob_pt_setup()
524 ttm_bo_unreserve(bo); vmw_mob_pt_setup()
553 struct ttm_buffer_object *bo = mob->pt_bo; vmw_mob_unbind() local
555 if (bo) { vmw_mob_unbind()
556 ret = ttm_bo_reserve(bo, false, true, false, NULL); vmw_mob_unbind()
573 if (bo) { vmw_mob_unbind()
574 vmw_fence_single_bo(bo, NULL); vmw_mob_unbind()
575 ttm_bo_unreserve(bo); vmw_mob_unbind()
H A Dvmwgfx_gmrid_manager.c48 struct ttm_buffer_object *bo, vmw_gmrid_man_get_node()
62 gman->used_gmr_pages += bo->num_pages; vmw_gmrid_man_get_node()
86 mem->num_pages = bo->num_pages; vmw_gmrid_man_get_node()
96 gman->used_gmr_pages -= bo->num_pages; vmw_gmrid_man_get_node()
47 vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_mem_reg *mem) vmw_gmrid_man_get_node() argument
H A Dvmwgfx_drv.h626 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
631 void (*bo_free) (struct ttm_buffer_object *bo));
632 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
650 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
652 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
667 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
669 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
677 struct vmw_dma_buffer *bo,
687 struct vmw_dma_buffer *bo,
690 struct vmw_dma_buffer *bo,
694 extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
757 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
758 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
760 vmw_bo_sg_table(struct ttm_buffer_object *bo);
894 struct ttm_buffer_object *bo,
1104 struct ttm_buffer_object *bo = &tmp_buf->base; vmw_dmabuf_unreference() local
1106 ttm_bo_unref(&bo); vmw_dmabuf_unreference()
H A Dvmwgfx_fifo.c548 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; vmw_fifo_emit_dummy_legacy_query() local
566 if (bo->mem.mem_type == TTM_PL_VRAM) { vmw_fifo_emit_dummy_legacy_query()
568 cmd->body.guestResult.offset = bo->offset; vmw_fifo_emit_dummy_legacy_query()
570 cmd->body.guestResult.gmrId = bo->mem.start; vmw_fifo_emit_dummy_legacy_query()
597 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; vmw_fifo_emit_dummy_gb_query() local
614 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_fifo_emit_dummy_gb_query()
615 cmd->body.mobid = bo->mem.start; vmw_fifo_emit_dummy_gb_query()
H A Dvmwgfx_execbuf.c299 * vmw_bo_to_validate_list - add a bo to a validate list
302 * @bo: The buffer object to add.
311 struct ttm_buffer_object *bo, vmw_bo_to_validate_list()
321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo, vmw_bo_to_validate_list()
339 vval_buf->hash.key = (unsigned long) bo; vmw_bo_to_validate_list()
348 val_buf->bo = ttm_bo_reference(bo); vmw_bo_to_validate_list()
383 struct ttm_buffer_object *bo = &res->backup->base; vmw_resources_reserve() local
386 (sw_context, bo, vmw_resources_reserve()
885 struct ttm_buffer_object *bo; vmw_translate_mob_ptr() local
897 bo = &vmw_bo->base; vmw_translate_mob_ptr()
910 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index); vmw_translate_mob_ptr()
948 struct ttm_buffer_object *bo; vmw_translate_guest_ptr() local
960 bo = &vmw_bo->base; vmw_translate_guest_ptr()
972 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index); vmw_translate_guest_ptr()
2159 struct ttm_buffer_object *bo; vmw_apply_relocations() local
2164 bo = validate->bo; vmw_apply_relocations()
2165 switch (bo->mem.mem_type) { vmw_apply_relocations()
2167 reloc->location->offset += bo->offset; vmw_apply_relocations()
2171 reloc->location->gmrId = bo->mem.start; vmw_apply_relocations()
2174 *reloc->mob_loc = bo->mem.start; vmw_apply_relocations()
2217 ttm_bo_unref(&entry->base.bo); vmw_clear_validations()
2228 struct ttm_buffer_object *bo, vmw_validate_single_buffer()
2238 if (bo == dev_priv->pinned_bo || vmw_validate_single_buffer()
2239 (bo == dev_priv->dummy_query_bo && vmw_validate_single_buffer()
2244 return ttm_bo_validate(bo, &vmw_mob_placement, true, false); vmw_validate_single_buffer()
2253 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); vmw_validate_single_buffer()
2263 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); vmw_validate_single_buffer()
2274 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo, vmw_validate_buffers()
2641 * query bo.
2648 * This function should be used to unpin the pinned query bo, or
2677 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); __vmw_execbuf_release_pinned_bo()
2681 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); __vmw_execbuf_release_pinned_bo()
2715 ttm_bo_unref(&query_val.bo); __vmw_execbuf_release_pinned_bo()
2716 ttm_bo_unref(&pinned_val.bo); __vmw_execbuf_release_pinned_bo()
2725 ttm_bo_unref(&query_val.bo); __vmw_execbuf_release_pinned_bo()
2726 ttm_bo_unref(&pinned_val.bo); __vmw_execbuf_release_pinned_bo()
2732 * query bo.
2736 * This function should be used to unpin the pinned query bo, or
310 vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, bool validate_as_mob, uint32_t *p_val_node) vmw_bo_to_validate_list() argument
2227 vmw_validate_single_buffer(struct vmw_private *dev_priv, struct ttm_buffer_object *bo, bool validate_as_mob) vmw_validate_single_buffer() argument
H A Dvmwgfx_drv.c285 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
295 * Returns an error if bo creation or initialization fails.
300 struct ttm_buffer_object *bo; vmw_dummy_query_bo_create() local
306 * Create the bo as pinned, so that a tryreserve will vmw_dummy_query_bo_create()
308 * user of the bo currently. vmw_dummy_query_bo_create()
315 &bo); vmw_dummy_query_bo_create()
320 ret = ttm_bo_reserve(bo, false, true, false, NULL); vmw_dummy_query_bo_create()
323 ret = ttm_bo_kmap(bo, 0, 1, &map); vmw_dummy_query_bo_create()
331 vmw_bo_pin(bo, false); vmw_dummy_query_bo_create()
332 ttm_bo_unreserve(bo); vmw_dummy_query_bo_create()
336 ttm_bo_unref(&bo); vmw_dummy_query_bo_create()
338 dev_priv->dummy_query_bo = bo; vmw_dummy_query_bo_create()
380 * the pinned bo. vmw_release_device()
H A Dvmwgfx_context.c302 struct ttm_buffer_object *bo = val_buf->bo; vmw_gb_context_bind() local
304 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_gb_context_bind()
316 cmd->body.mobid = bo->mem.start; vmw_gb_context_bind()
329 struct ttm_buffer_object *bo = val_buf->bo; vmw_gb_context_unbind() local
346 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_gb_context_unbind()
384 vmw_fence_single_bo(bo, fence); vmw_gb_context_unbind()
H A Dvmwgfx_shader.c186 struct ttm_buffer_object *bo = val_buf->bo; vmw_gb_shader_bind() local
188 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_gb_shader_bind()
200 cmd->body.mobid = bo->mem.start; vmw_gb_shader_bind()
242 vmw_fence_single_bo(val_buf->bo, fence); vmw_gb_shader_unbind()
H A Dvmwgfx_surface.c446 BUG_ON(val_buf->bo == NULL); vmw_legacy_srf_dma()
455 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); vmw_legacy_srf_dma()
467 vmw_fence_single_bo(val_buf->bo, fence); vmw_legacy_srf_dma()
1093 struct ttm_buffer_object *bo = val_buf->bo; vmw_gb_surface_bind() local
1095 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_gb_surface_bind()
1109 cmd1->body.mobid = bo->mem.start; vmw_gb_surface_bind()
1127 struct ttm_buffer_object *bo = val_buf->bo; vmw_gb_surface_unbind() local
1146 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); vmw_gb_surface_unbind()
1184 vmw_fence_single_bo(val_buf->bo, fence); vmw_gb_surface_unbind()
H A Dvmwgfx_fb.c565 struct ttm_buffer_object *bo; vmw_fb_close() local
572 bo = &par->vmw_bo->base; vmw_fb_close()
580 ttm_bo_unref(&bo); vmw_fb_close()
H A Dvmwgfx_scrn.c205 struct ttm_buffer_object *bo; vmw_sou_backing_free() local
210 bo = &sou->buffer->base; vmw_sou_backing_free()
211 ttm_bo_unref(&bo); vmw_sou_backing_free()
H A Dvmwgfx_kms.c291 struct ttm_buffer_object *bo, vmw_kms_cursor_snoop()
346 ret = ttm_bo_reserve(bo, true, false, false, NULL); vmw_kms_cursor_snoop()
352 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); vmw_kms_cursor_snoop()
381 ttm_bo_unreserve(bo); vmw_kms_cursor_snoop()
1139 struct vmw_dma_buffer *bo = NULL; vmw_kms_fb_create() local
1186 &surface, &bo); vmw_kms_fb_create()
1191 if (bo) vmw_kms_fb_create()
1192 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, vmw_kms_fb_create()
1202 if (bo) vmw_kms_fb_create()
1203 vmw_dmabuf_unreference(&bo); vmw_kms_fb_create()
289 vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, SVGA3dCmdHeader *header) vmw_kms_cursor_snoop() argument
/linux-4.1.27/include/trace/events/
H A Dhost1x.h84 TP_PROTO(const char *name, struct host1x_bo *bo,
87 TP_ARGS(name, bo, words, offset, cmdbuf),
91 __field(struct host1x_bo *, bo)
105 __entry->bo = bo;
110 TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
111 __entry->name, __entry->bo,
226 TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
229 TP_ARGS(bo, offset, syncpt_id, thresh, min),
232 __field(struct host1x_bo *, bo)
240 __entry->bo = bo;
247 TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
248 __entry->bo, __entry->offset,
/linux-4.1.27/include/drm/ttm/
H A Dttm_bo_driver.h185 * @bo: Pointer to the buffer object we're allocating space for.
210 struct ttm_buffer_object *bo,
374 * @bo: the buffer object to be evicted
376 * Return the bo flags for a buffer which is not mapped to the hardware.
378 * finished, they'll end up in bo->mem.flags
381 void(*evict_flags) (struct ttm_buffer_object *bo,
386 * @bo: the buffer to move
396 int (*move) (struct ttm_buffer_object *bo,
404 * @bo: Pointer to a buffer object.
413 int (*verify_access) (struct ttm_buffer_object *bo,
418 void (*move_notify)(struct ttm_buffer_object *bo,
422 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
425 * notify the driver that we're about to swap out this bo
427 void (*swap_notify) (struct ttm_buffer_object *bo);
457 * @lru_lock: Spinlock protecting the bo subsystem lru lists.
515 * Constant after bo device init / atomic.
678 * @bo: Pointer to a struct ttm_buffer_object. the data of which
685 * Allocate memory space for the buffer object pointed to by @bo, using
694 extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
700 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
702 extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
716 * @mapping: The address space to use for this bo.
734 * @bo: tear down the virtual mappings for this BO
736 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
741 * @bo: tear down the virtual mappings for this BO
745 extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
747 extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
748 extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
753 extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
754 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
759 * @bo: A pointer to a struct ttm_buffer_object.
762 * @use_ticket: If @bo is already reserved, Only sleep waiting for
770 * Release all buffer reservations, wait for @bo to become unreserved and
778 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, __ttm_bo_reserve() argument
790 success = ww_mutex_trylock(&bo->resv->lock); __ttm_bo_reserve()
795 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); __ttm_bo_reserve()
797 ret = ww_mutex_lock(&bo->resv->lock, ticket); __ttm_bo_reserve()
806 * @bo: A pointer to a struct ttm_buffer_object.
809 * @use_ticket: If @bo is already reserved, Only sleep waiting for
833 * release all its buffer reservations, wait for @bo to become unreserved, and
840 * Release all buffer reservations, wait for @bo to become unreserved and
848 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, ttm_bo_reserve() argument
855 WARN_ON(!atomic_read(&bo->kref.refcount)); ttm_bo_reserve()
857 ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); ttm_bo_reserve()
859 ttm_bo_del_sub_from_lru(bo); ttm_bo_reserve()
866 * @bo: A pointer to a struct ttm_buffer_object.
868 * @sequence: Set (@bo)->sequence to this value after lock
874 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ttm_bo_reserve_slowpath() argument
880 WARN_ON(!atomic_read(&bo->kref.refcount)); ttm_bo_reserve_slowpath()
883 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, ttm_bo_reserve_slowpath()
886 ww_mutex_lock_slow(&bo->resv->lock, ticket); ttm_bo_reserve_slowpath()
889 ttm_bo_del_sub_from_lru(bo); ttm_bo_reserve_slowpath()
898 * @bo: A pointer to a struct ttm_buffer_object.
900 * Unreserve a previous reservation of @bo where the buffer object is
903 static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) __ttm_bo_unreserve() argument
905 ww_mutex_unlock(&bo->resv->lock); __ttm_bo_unreserve()
911 * @bo: A pointer to a struct ttm_buffer_object.
913 * Unreserve a previous reservation of @bo.
915 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) ttm_bo_unreserve() argument
917 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { ttm_bo_unreserve()
918 spin_lock(&bo->glob->lru_lock); ttm_bo_unreserve()
919 ttm_bo_add_to_lru(bo); ttm_bo_unreserve()
920 spin_unlock(&bo->glob->lru_lock); ttm_bo_unreserve()
922 __ttm_bo_unreserve(bo); ttm_bo_unreserve()
927 * @bo: A pointer to a struct ttm_buffer_object.
930 * Unreserve a previous reservation of @bo made with @ticket.
932 static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, ttm_bo_unreserve_ticket() argument
935 ttm_bo_unreserve(bo); ttm_bo_unreserve_ticket()
949 * @bo: A pointer to a struct ttm_buffer_object.
957 * and update the (@bo)->mem placement flags. If unsuccessful, the old
964 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
971 * @bo: A pointer to a struct ttm_buffer_object.
979 * and update the (@bo)->mem placement flags. If unsuccessful, the old
986 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
993 * @bo: A pointer to a struct ttm_buffer_object.
997 extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
1002 * @bo: A pointer to a struct ttm_buffer_object.
1016 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
H A Dttm_bo_api.h155 * @type: The bo type.
215 * Members protected by the bo::resv::reserved lock.
224 * Members protected by the bo::reserved lock only when written to.
239 * Members protected by a bo reservation.
248 * and the bo::lock when written to. Can be read with
269 * Object describing a kernel mapping. Since a TTM bo may be located
285 struct ttm_buffer_object *bo; member in struct:ttm_bo_kmap_obj
291 * @bo: The buffer object.
297 ttm_bo_reference(struct ttm_buffer_object *bo) ttm_bo_reference() argument
299 kref_get(&bo->kref); ttm_bo_reference()
300 return bo; ttm_bo_reference()
306 * @bo: The buffer object.
310 * This function must be called with the bo::mutex held, and makes
317 extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
322 * @bo: The buffer object.
335 extern int ttm_bo_validate(struct ttm_buffer_object *bo,
343 * @bo: The buffer object.
347 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
353 * @bo: The buffer object.
354 * @count: The number of references with which to decrease @bo::list_kref;
359 extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
365 * @bo: The buffer object.
367 * Add this bo to the relevant mem type lru and, if it's backed by
370 * is typically called immediately prior to unreserving a bo.
372 extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
377 * @bo: The buffer object.
379 * Remove this bo from all lru lists used to lookup and reserve an object.
381 * and is usually called just immediately after the bo has been reserved to
384 extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
407 * @bo: The buffer object:
419 ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
424 * @bo : The buffer object.
428 extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
450 * @bo: Pointer to a ttm_buffer_object to be initialized.
472 * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
482 struct ttm_buffer_object *bo,
618 * @bo: The buffer object.
632 extern int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
649 * @bo: The bo backing the address space. The address space will
650 * have the same size as the bo, and start at offset 0.
653 * if the fbdev address space is to be backed by a bo.
657 struct ttm_buffer_object *bo);
667 * if the device address space is to be backed by the bo manager.
699 extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
H A Dttm_execbuf_util.h41 * @bo: refcounted buffer object pointer.
47 struct ttm_buffer_object *bo; member in struct:ttm_validate_buffer
/linux-4.1.27/drivers/gpu/host1x/
H A Djob.h25 struct host1x_bo *bo; member in struct:host1x_job_gather
38 struct host1x_bo *bo; member in struct:host1x_waitchk
45 struct host1x_bo *bo; member in struct:host1x_job_unpin_data
H A Djob.c100 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, host1x_job_add_gather() argument
106 cur_gather->bo = bo; host1x_job_add_gather()
155 if (patch != wait->bo) do_waitchks()
158 trace_host1x_syncpt_wait_check(wait->bo, wait->offset, do_waitchks()
171 wait->bo = NULL; do_waitchks()
188 reloc->target.bo = host1x_bo_get(reloc->target.bo); pin_job()
189 if (!reloc->target.bo) pin_job()
192 phys_addr = host1x_bo_pin(reloc->target.bo, &sgt); pin_job()
197 job->unpins[job->num_unpins].bo = reloc->target.bo; pin_job()
207 g->bo = host1x_bo_get(g->bo); pin_job()
208 if (!g->bo) pin_job()
211 phys_addr = host1x_bo_pin(g->bo, &sgt); pin_job()
216 job->unpins[job->num_unpins].bo = g->bo; pin_job()
242 if (cmdbuf != reloc->cmdbuf.bo) do_relocs()
275 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset) check_reloc()
393 fw->cmdbuf = g->bo; validate()
485 gather = host1x_bo_mmap(g->bo); copy_gathers()
488 host1x_bo_munmap(g->bo, gather); copy_gathers()
542 if (job->gathers[j].bo == g->bo) host1x_job_pin()
545 err = do_relocs(job, g->bo); host1x_job_pin()
549 err = do_waitchks(job, host, g->bo); host1x_job_pin()
575 host1x_bo_unpin(unpin->bo, unpin->sgt); host1x_job_unpin()
576 host1x_bo_put(unpin->bo); host1x_job_unpin()
/linux-4.1.27/drivers/crypto/vmx/
H A Dppc-xlate.pl106 my $bo = $f=~/[\+\-]/ ? 16+9 : 16; # optional "to be taken" hint
107 " bc $bo,0,".shift;
111 my $bo = $f=~/\-/ ? 12+2 : 12; # optional "not to be taken" hint
113 " .long ".sprintf "0x%x",19<<26|$bo<<21|16<<1 :
114 " bclr $bo,0";
118 my $bo = $f=~/\-/ ? 4+2 : 4; # optional "not to be taken" hint
120 " .long ".sprintf "0x%x",19<<26|$bo<<21|2<<16|16<<1 :
121 " bclr $bo,2";
125 my $bo = $f=~/-/ ? 12+2 : 12; # optional "not to be taken" hint
127 " .long ".sprintf "0x%X",19<<26|$bo<<21|2<<16|16<<1 :
128 " bclr $bo,2";
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_fbdev.c38 struct drm_gem_object *bo; member in struct:omap_fbdev
55 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); pan_worker()
141 /* allocate backing bo */ omap_fbdev_create()
146 fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC); omap_fbdev_create()
147 if (!fbdev->bo) { omap_fbdev_create()
153 fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo); omap_fbdev_create()
157 * to unref the bo: omap_fbdev_create()
159 drm_gem_object_unreference(fbdev->bo); omap_fbdev_create()
164 /* note: this keeps the bo pinned.. which is perhaps not ideal, omap_fbdev_create()
172 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true); omap_fbdev_create()
212 fbi->screen_base = omap_gem_vaddr(fbdev->bo); omap_fbdev_create()
213 fbi->screen_size = fbdev->bo->size; omap_fbdev_create()
215 fbi->fix.smem_len = fbdev->bo->size; omap_fbdev_create()
332 omap_gem_put_paddr(fbdev->bo); omap_fbdev_free()
H A Domap_fb.c79 struct drm_gem_object *bo; member in struct:plane
100 omap_fb->planes[0].bo, handle); omap_framebuffer_create_handle()
114 if (plane->bo) omap_framebuffer_destroy()
115 drm_gem_object_unreference_unlocked(plane->bo); omap_framebuffer_destroy()
168 if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) { omap_framebuffer_update_scanout()
206 omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr); omap_framebuffer_update_scanout()
208 info->screen_width = omap_gem_tiled_stride(plane->bo, orient); omap_framebuffer_update_scanout()
236 WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED)); omap_framebuffer_update_scanout()
237 omap_gem_rotated_paddr(plane->bo, orient, omap_framebuffer_update_scanout()
260 ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true); omap_framebuffer_pin()
263 omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE); omap_framebuffer_pin()
273 omap_gem_put_paddr(plane->bo); omap_framebuffer_pin()
293 ret = omap_gem_put_paddr(plane->bo); omap_framebuffer_unpin()
310 return omap_fb->planes[p].bo; omap_framebuffer_bo()
353 omap_gem_describe(plane->bo, m); omap_framebuffer_describe()
443 plane->bo = bos[i]; omap_framebuffer_init()
H A Domap_crtc.c582 struct drm_gem_object *bo; page_flip_worker() local
591 bo = omap_framebuffer_bo(crtc->primary->fb, 0); page_flip_worker()
592 drm_gem_object_unreference_unlocked(bo); page_flip_worker()
613 struct drm_gem_object *bo; omap_crtc_page_flip_locked() local
635 * and takes the reference to the bo. This avoids it omap_crtc_page_flip_locked()
638 bo = omap_framebuffer_bo(fb, 0); omap_crtc_page_flip_locked()
639 drm_gem_object_reference(bo); omap_crtc_page_flip_locked()
641 omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc); omap_crtc_page_flip_locked()
H A Domap_dmm_tiler.h114 /* GEM bo flags -> tiler fmt */ gem2fmt()
H A Domap_plane.c61 /* for deferring bo unpin's until next post_apply(): */
/linux-4.1.27/net/can/
H A Dbcm.c163 struct bcm_sock *bo = bcm_sk(sk); bcm_proc_show() local
168 seq_printf(m, " / bo %pK", bo); bcm_proc_show()
169 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); bcm_proc_show()
170 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); bcm_proc_show()
173 list_for_each_entry(op, &bo->rx_ops, list) { bcm_proc_show()
204 list_for_each_entry(op, &bo->tx_ops, list) { bcm_proc_show()
340 struct bcm_sock *bo = bcm_sk(sk); bcm_send_to_user() local
344 bo->dropped_usr_msgs++; bcm_send_to_user()
831 struct bcm_sock *bo = bcm_sk(sk); bcm_tx_setup() local
845 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); bcm_tx_setup()
932 list_add(&op->list, &bo->tx_ops); bcm_tx_setup()
934 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ bcm_tx_setup()
988 struct bcm_sock *bo = bcm_sk(sk); bcm_rx_setup() local
1010 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); bcm_rx_setup()
1106 list_add(&op->list, &bo->rx_ops); bcm_rx_setup()
1111 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ bcm_rx_setup()
1239 struct bcm_sock *bo = bcm_sk(sk); bcm_sendmsg() local
1240 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ bcm_sendmsg()
1244 if (!bo->bound) bcm_sendmsg()
1301 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) bcm_sendmsg()
1308 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) bcm_sendmsg()
1317 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); bcm_sendmsg()
1323 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); bcm_sendmsg()
1351 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); bcm_notifier() local
1352 struct sock *sk = &bo->sk; bcm_notifier()
1368 list_for_each_entry(op, &bo->rx_ops, list) bcm_notifier()
1373 if (bo->bound && bo->ifindex == dev->ifindex) { bcm_notifier()
1374 bo->bound = 0; bcm_notifier()
1375 bo->ifindex = 0; bcm_notifier()
1389 if (bo->bound && bo->ifindex == dev->ifindex) { bcm_notifier()
1404 struct bcm_sock *bo = bcm_sk(sk); bcm_init() local
1406 bo->bound = 0; bcm_init()
1407 bo->ifindex = 0; bcm_init()
1408 bo->dropped_usr_msgs = 0; bcm_init()
1409 bo->bcm_proc_read = NULL; bcm_init()
1411 INIT_LIST_HEAD(&bo->tx_ops); bcm_init()
1412 INIT_LIST_HEAD(&bo->rx_ops); bcm_init()
1415 bo->notifier.notifier_call = bcm_notifier; bcm_init()
1417 register_netdevice_notifier(&bo->notifier); bcm_init()
1428 struct bcm_sock *bo; bcm_release() local
1434 bo = bcm_sk(sk); bcm_release()
1438 unregister_netdevice_notifier(&bo->notifier); bcm_release()
1442 list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_release()
1445 list_for_each_entry_safe(op, next, &bo->rx_ops, list) { bcm_release()
1474 if (proc_dir && bo->bcm_proc_read) bcm_release()
1475 remove_proc_entry(bo->procname, proc_dir); bcm_release()
1478 if (bo->bound) { bcm_release()
1479 bo->bound = 0; bcm_release()
1480 bo->ifindex = 0; bcm_release()
1497 struct bcm_sock *bo = bcm_sk(sk); bcm_connect() local
1502 if (bo->bound) bcm_connect()
1518 bo->ifindex = dev->ifindex; bcm_connect()
1523 bo->ifindex = 0; bcm_connect()
1526 bo->bound = 1; bcm_connect()
1530 sprintf(bo->procname, "%lu", sock_i_ino(sk)); bcm_connect()
1531 bo->bcm_proc_read = proc_create_data(bo->procname, 0644, bcm_connect()
/linux-4.1.27/drivers/gpu/host1x/hw/
H A Dchannel_hw.c32 static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo, trace_write_gather() argument
39 mem = host1x_bo_mmap(bo); trace_write_gather()
51 trace_host1x_cdma_push_gather(dev_name(dev), bo, trace_write_gather() local
56 host1x_bo_munmap(bo, mem); trace_write_gather()
69 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff); submit_gathers()
H A Ddebug_hw.c159 mapped = host1x_bo_mmap(g->bo); show_channel_gathers()
173 host1x_bo_munmap(g->bo, mapped); show_channel_gathers()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dctxgf117.c195 u32 bo = 0; gf117_grctx_generate_attrib() local
196 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; gf117_grctx_generate_attrib()
210 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); gf117_grctx_generate_attrib()
211 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); gf117_grctx_generate_attrib()
212 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; gf117_grctx_generate_attrib()
H A Dctxgf108.c743 u32 bo = 0; gf108_grctx_generate_attrib() local
744 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; gf108_grctx_generate_attrib()
758 mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo); gf108_grctx_generate_attrib()
759 mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo); gf108_grctx_generate_attrib()
760 bo += impl->attrib_nr_max; gf108_grctx_generate_attrib()
H A Dctxgf100.c1064 u32 bo = 0; gf100_grctx_generate_attrib() local
1073 mmio_skip(info, o, (attrib << 16) | ++bo); gf100_grctx_generate_attrib()
1074 mmio_wr32(info, o, (attrib << 16) | --bo); gf100_grctx_generate_attrib()
1075 bo += impl->attrib_nr_max; gf100_grctx_generate_attrib()
H A Dctxgm107.c907 u32 bo = 0; gm107_grctx_generate_attrib() local
908 u32 ao = bo + impl->attrib_nr_max * priv->tpc_total; gm107_grctx_generate_attrib()
924 mmio_wr32(info, o + 0xf4, bo); gm107_grctx_generate_attrib()
925 bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc]; gm107_grctx_generate_attrib()
/linux-4.1.27/crypto/
H A Daes_generic.c1300 #define f_rn(bo, bi, n, k) do { \
1301 bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \
1307 #define f_nround(bo, bi, k) do {\
1308 f_rn(bo, bi, 0, k); \
1309 f_rn(bo, bi, 1, k); \
1310 f_rn(bo, bi, 2, k); \
1311 f_rn(bo, bi, 3, k); \
1315 #define f_rl(bo, bi, n, k) do { \
1316 bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \
1322 #define f_lround(bo, bi, k) do {\
1323 f_rl(bo, bi, 0, k); \
1324 f_rl(bo, bi, 1, k); \
1325 f_rl(bo, bi, 2, k); \
1326 f_rl(bo, bi, 3, k); \
1372 #define i_rn(bo, bi, n, k) do { \
1373 bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \
1379 #define i_nround(bo, bi, k) do {\
1380 i_rn(bo, bi, 0, k); \
1381 i_rn(bo, bi, 1, k); \
1382 i_rn(bo, bi, 2, k); \
1383 i_rn(bo, bi, 3, k); \
1387 #define i_rl(bo, bi, n, k) do { \
1388 bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \
1394 #define i_lround(bo, bi, k) do {\
1395 i_rl(bo, bi, 0, k); \
1396 i_rl(bo, bi, 1, k); \
1397 i_rl(bo, bi, 2, k); \
1398 i_rl(bo, bi, 3, k); \
/linux-4.1.27/include/uapi/drm/
H A Dqxl_drm.h62 * dest is the bo we are writing the relocation into
63 * src is bo we are relocating.
H A Dmsm_drm.h159 * avoid kernel needing to map/access the cmdstream bo in the common
191 * APIs without requiring a dummy bo to synchronize on.
H A Dvmwgfx_drm.h1027 * while the buffer is synced for CPU. This is similar to the GEM bo idle
/linux-4.1.27/net/ieee802154/
H A Dtrace.h37 #define BOOL_TO_STR(bo) (bo) ? "true" : "false"
/linux-4.1.27/arch/mips/include/asm/xtalk/
H A Dxwidget.h133 unsigned bo:1; member in struct:__anon2117
/linux-4.1.27/include/drm/
H A Ddrm_flip_work.h36 * bo's, etc until after vblank. The APIs are all thread-safe.
H A Ddrm_crtc.h189 * cleanup (like releasing the reference(s) on the backing GEM bo(s))
/linux-4.1.27/kernel/trace/
H A Dtrace_probe.c473 unsigned long bw, bo; __parse_bitfield_probe_arg() local
492 bo = simple_strtoul(bf, &tail, 0); __parse_bitfield_probe_arg()
497 bprm->hi_shift = BYTES_TO_BITS(t->size) - (bw + bo); __parse_bitfield_probe_arg()
498 bprm->low_shift = bprm->hi_shift + bo; __parse_bitfield_probe_arg()
500 return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; __parse_bitfield_probe_arg()
/linux-4.1.27/drivers/gpu/drm/nouveau/dispnv04/
H A Doverlay.c139 nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset); nv10_update_plane()
159 nv_fb->nvbo->bo.offset + fb->offsets[1]); nv10_update_plane()
388 nv_fb->nvbo->bo.offset); nv04_update_plane()
H A Dcrtc.c852 nv_crtc->fb.offset = fb->nvbo->bo.offset; nv04_crtc_do_mode_set_base()
1020 nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset; nv04_crtc_cursor_set()
/linux-4.1.27/drivers/gpu/ipu-v3/
H A Dipu-cpmem.c295 int bpp = 0, npb = 0, ro, go, bo, to; ipu_cpmem_set_format_rgb() local
299 bo = rgb->bits_per_pixel - rgb->blue.length - rgb->blue.offset; ipu_cpmem_set_format_rgb()
307 ipu_ch_param_write_field(ch, IPU_FIELD_OFS2, bo); ipu_cpmem_set_format_rgb()
/linux-4.1.27/drivers/media/usb/dvb-usb/
H A Dtechnisat-usb2.c407 u8 bo = offset & 0xff; technisat_usb2_eeprom_lrc_read() local
411 .buf = &bo, technisat_usb2_eeprom_lrc_read()
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp4/
H A Dmdp4_kms.c509 dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); mdp4_kms_init()
517 dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); mdp4_kms_init()
H A Dmdp4_kms.h51 /* empty/blank cursor bo to use when cursor is "disabled" */
H A Dmdp4_crtc.c58 /* for unref'ing cursor bo's after scanout completes: */
/linux-4.1.27/drivers/gpu/drm/gma500/
H A Dpsb_intel_drv.h97 size_t(*bo_offset) (struct drm_device *dev, void *bo);
H A Dgma_display.c440 /* unpin the old bo */ gma_crtc_cursor_set()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_userptr.c613 * pages they need to create a new bo to point to the new vma. i915_gem_userptr_get_pages()
617 * synchronisation issue as a regular bo, except that this time i915_gem_userptr_get_pages()
762 * 3. We only allow a bo as large as we could in theory map into the GTT,
764 * 4. The bo is marked as being snoopable. The backing pages are left
H A Dintel_overlay.c1381 DRM_ERROR("failed to pin overlay register bo\n"); intel_setup_overlay()
1388 DRM_ERROR("failed to move overlay register bo into the GTT\n"); intel_setup_overlay()
1432 /* The bo's should be free'd by the generic code already. intel_cleanup_overlay()
H A Dintel_ringbuffer.c1732 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1766 * stable batch scratch bo area (so that the CS never i830_dispatch_execbuffer()
2504 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); intel_init_render_ring_buffer()
2511 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); intel_init_render_ring_buffer()
2610 DRM_ERROR("Failed to allocate batch bo\n"); intel_init_render_ring_buffer()
2617 DRM_ERROR("Failed to ping batch bo\n"); intel_init_render_ring_buffer()
H A Di915_dma.c880 * requests (and thus managing bo) once the task has been completed i915_driver_load()
883 * bo. i915_driver_load()
H A Dintel_fbdev.c273 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", intelfb_create()
H A Di915_gem_execbuffer.c562 * contained within a mmaped bo. For in such a case we, the page i915_gem_execbuffer_relocate()
675 /* avoid costly ping-pong once a batch bo ended up non-mappable */ eb_vma_misplaced()
H A Di915_drv.h1883 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
H A Di915_gem.c4141 "bo is already pinned in %s with incorrect alignment:" i915_gem_object_do_pin()
H A Dintel_display.c2390 "Y tiling bo slipped through, driver bug!\n")) intel_pin_and_fence_fb_obj()
2404 * bo. We currently fill all unused PTE with the shadow page and so intel_pin_and_fence_fb_obj()
/linux-4.1.27/arch/powerpc/lib/
H A Dsstep.c65 unsigned int bo = (instr >> 21) & 0x1f; branch_taken() local
68 if ((bo & 4) == 0) { branch_taken()
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0)) branch_taken()
74 if ((bo & 0x10) == 0) { branch_taken()
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1)) branch_taken()
/linux-4.1.27/drivers/usb/wusbcore/
H A Dcrypto.c128 u8 *bo = _bo; bytewise_xor() local
132 bo[itr] = bi1[itr] ^ bi2[itr]; bytewise_xor()
/linux-4.1.27/drivers/dma-buf/
H A Dreservation.c4 * Based on bo.c which bears the following copyright notice,
/linux-4.1.27/drivers/gpu/drm/msm/adreno/
H A Dadreno_gpu.c60 ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); adreno_hw_init()
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp5/
H A Dmdp5_crtc.c53 /* for unref'ing cursor bo's after scanout completes: */
/linux-4.1.27/arch/powerpc/xmon/
H A Dppc-opc.c1562 #define BBO(op, bo, aa, lk) (B ((op), (aa), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
1576 #define BBOCB(op, bo, cb, aa, lk) \
1577 (BBO ((op), (bo), (aa), (lk)) | ((((unsigned long)(cb)) & 0x3) << 16))
1755 #define XLO(op, bo, xop, lk) \
1756 (XLLK ((op), (xop), (lk)) | ((((unsigned long)(bo)) & 0x1f) << 21))
1766 #define XLOCB(op, bo, cb, xop, lk) \
1767 (XLO ((op), (bo), (xop), (lk)) | ((((unsigned long)(cb)) & 3) << 16))
/linux-4.1.27/net/wireless/
H A Dtrace.h188 #define BOOL_TO_STR(bo) (bo) ? "true" : "false"
/linux-4.1.27/arch/mips/include/asm/sn/sn0/
H A Dhubio.h882 bo: 1, /* 31: barrier op set in xtalk rqst*/ member in struct:icrbp_a::__anon2086
/linux-4.1.27/drivers/net/ethernet/8390/
H A Daxnet_cs.c674 pr_debug("%s: [bo=%d]\n", dev->name, count); block_output()
H A Dpcnet_cs.c1222 netif_dbg(ei_local, tx_queued, dev, "[bo=%d]\n", count); dma_block_output()
/linux-4.1.27/drivers/usb/host/
H A Doxu210hp-hcd.c3696 static const char * const bo[] = { oxu_verify_id() local
3711 bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT], oxu_verify_id()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_main.c1140 "t:%u bo:%lu\n", toggle, c->bit_offset); fill_bitmap_rle_bits()

Completed in 3828 milliseconds