Searched refs:msm_obj (Results 1 - 5 of 5) sorted by relevance

/linux-4.4.14/drivers/gpu/drm/msm/
H A Dmsm_gem.c29 struct msm_gem_object *msm_obj = to_msm_bo(obj); physaddr() local
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + physaddr()
37 struct msm_gem_object *msm_obj = to_msm_bo(obj); use_pages() local
38 return !msm_obj->vram_node; use_pages()
45 struct msm_gem_object *msm_obj = to_msm_bo(obj); get_pages_vram() local
55 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, get_pages_vram()
74 struct msm_gem_object *msm_obj = to_msm_bo(obj); get_pages() local
76 if (!msm_obj->pages) { get_pages()
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); get_pages()
93 if (IS_ERR(msm_obj->sgt)) { get_pages()
95 return ERR_CAST(msm_obj->sgt); get_pages()
98 msm_obj->pages = p; get_pages()
103 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) get_pages()
104 dma_map_sg(dev->dev, msm_obj->sgt->sgl, get_pages()
105 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); get_pages()
108 return msm_obj->pages; get_pages()
113 struct msm_gem_object *msm_obj = to_msm_bo(obj); put_pages() local
115 if (msm_obj->pages) { put_pages()
119 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) put_pages()
120 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, put_pages()
121 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); put_pages()
122 sg_free_table(msm_obj->sgt); put_pages()
123 kfree(msm_obj->sgt); put_pages()
126 drm_gem_put_pages(obj, msm_obj->pages, true, false); put_pages()
128 drm_mm_remove_node(msm_obj->vram_node); put_pages()
129 drm_free_large(msm_obj->pages); put_pages()
132 msm_obj->pages = NULL; put_pages()
154 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_mmap_obj() local
159 if (msm_obj->flags & MSM_BO_WC) { msm_gem_mmap_obj()
161 } else if (msm_obj->flags & MSM_BO_UNCACHED) { msm_gem_mmap_obj()
286 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_get_iova_locked() local
289 if (!msm_obj->domain[id].iova) { msm_gem_get_iova_locked()
304 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; msm_gem_get_iova_locked()
321 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_get_iova() local
327 if (msm_obj->domain[id].iova) { msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; msm_gem_get_iova()
343 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_iova() local
344 WARN_ON(!msm_obj->domain[id].iova); msm_gem_iova()
345 return msm_obj->domain[id].iova; msm_gem_iova()
390 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_vaddr_locked() local
392 if (!msm_obj->vaddr) { msm_gem_vaddr_locked()
396 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, msm_gem_vaddr_locked()
399 return msm_obj->vaddr; msm_gem_vaddr_locked()
417 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_queue_inactive_cb() local
418 uint32_t fence = msm_gem_fence(msm_obj, msm_gem_queue_inactive_cb()
426 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_move_to_active() local
427 msm_obj->gpu = gpu; msm_gem_move_to_active()
429 msm_obj->write_fence = fence; msm_gem_move_to_active()
431 msm_obj->read_fence = fence; msm_gem_move_to_active()
432 list_del_init(&msm_obj->mm_list); msm_gem_move_to_active()
433 list_add_tail(&msm_obj->mm_list, &gpu->active_list); msm_gem_move_to_active()
440 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_move_to_inactive() local
444 msm_obj->gpu = NULL; msm_gem_move_to_inactive()
445 msm_obj->read_fence = 0; msm_gem_move_to_inactive()
446 msm_obj->write_fence = 0; msm_gem_move_to_inactive()
447 list_del_init(&msm_obj->mm_list); msm_gem_move_to_inactive()
448 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); msm_gem_move_to_inactive()
454 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_cpu_prep() local
457 if (is_active(msm_obj)) { msm_gem_cpu_prep()
458 uint32_t fence = msm_gem_fence(msm_obj, op); msm_gem_cpu_prep()
481 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_describe() local
486 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', msm_gem_describe()
487 msm_obj->read_fence, msm_obj->write_fence, msm_gem_describe()
489 off, msm_obj->vaddr, obj->size); msm_gem_describe()
494 struct msm_gem_object *msm_obj; msm_gem_describe_objects() local
498 list_for_each_entry(msm_obj, list, mm_list) { list_for_each_entry()
499 struct drm_gem_object *obj = &msm_obj->base; list_for_each_entry()
514 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_free_object() local
520 WARN_ON(is_active(msm_obj)); msm_gem_free_object()
522 list_del(&msm_obj->mm_list); msm_gem_free_object()
524 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { msm_gem_free_object()
526 if (mmu && msm_obj->domain[id].iova) { msm_gem_free_object()
527 uint32_t offset = msm_obj->domain[id].iova; msm_gem_free_object()
528 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); msm_gem_free_object()
533 if (msm_obj->vaddr) msm_gem_free_object()
534 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); msm_gem_free_object()
539 if (msm_obj->pages) msm_gem_free_object()
540 drm_free_large(msm_obj->pages); msm_gem_free_object()
542 drm_prime_gem_destroy(obj, msm_obj->sgt); msm_gem_free_object()
544 vunmap(msm_obj->vaddr); msm_gem_free_object()
548 if (msm_obj->resv == &msm_obj->_resv) msm_gem_free_object()
549 reservation_object_fini(msm_obj->resv); msm_gem_free_object()
553 kfree(msm_obj); msm_gem_free_object()
587 struct msm_gem_object *msm_obj; msm_gem_new_impl() local
610 sz = sizeof(*msm_obj); msm_gem_new_impl()
614 msm_obj = kzalloc(sz, GFP_KERNEL); msm_gem_new_impl()
615 if (!msm_obj) msm_gem_new_impl()
619 msm_obj->vram_node = (void *)&msm_obj[1]; msm_gem_new_impl()
621 msm_obj->flags = flags; msm_gem_new_impl()
623 msm_obj->resv = &msm_obj->_resv; msm_gem_new_impl()
624 reservation_object_init(msm_obj->resv); msm_gem_new_impl()
626 INIT_LIST_HEAD(&msm_obj->submit_entry); msm_gem_new_impl()
627 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); msm_gem_new_impl()
629 *obj = &msm_obj->base; msm_gem_new_impl()
668 struct msm_gem_object *msm_obj; msm_gem_import() local
688 msm_obj = to_msm_bo(obj); msm_gem_import()
689 msm_obj->sgt = sgt; msm_gem_import()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); msm_gem_import()
691 if (!msm_obj->pages) { msm_gem_import()
696 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); msm_gem_import()
H A Dmsm_gem_submit.c69 struct msm_gem_object *msm_obj; submit_lookup_objects() local
99 msm_obj = to_msm_bo(obj); submit_lookup_objects()
101 if (!list_empty(&msm_obj->submit_entry)) { submit_lookup_objects()
110 submit->bos[i].obj = msm_obj; submit_lookup_objects()
112 list_add_tail(&msm_obj->submit_entry, &submit->bo_list); submit_lookup_objects()
124 struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_unlock_unpin_bo() local
127 msm_gem_put_iova(&msm_obj->base, submit->gpu->id); submit_unlock_unpin_bo()
130 ww_mutex_unlock(&msm_obj->resv->lock); submit_unlock_unpin_bo()
147 struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_validate_objects() local
156 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, submit_validate_objects()
165 ret = msm_gem_get_iova_locked(&msm_obj->base, submit_validate_objects()
200 struct msm_gem_object *msm_obj = submit->bos[contended].obj; submit_validate_objects() local
202 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, submit_validate_objects()
310 struct msm_gem_object *msm_obj = submit->bos[i].obj; submit_cleanup() local
312 list_del_init(&msm_obj->submit_entry); submit_cleanup()
313 drm_gem_object_unreference(&msm_obj->base); submit_cleanup()
361 struct msm_gem_object *msm_obj; msm_ioctl_gem_submit() local
383 &msm_obj, &iova, NULL); msm_ioctl_gem_submit()
395 msm_obj->base.size) { msm_ioctl_gem_submit()
409 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset, msm_ioctl_gem_submit()
H A Dmsm_gem_prime.c25 struct msm_gem_object *msm_obj = to_msm_bo(obj); msm_gem_prime_get_sg_table() local
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ msm_gem_prime_get_sg_table()
31 return drm_prime_pages_to_sg(msm_obj->pages, npages); msm_gem_prime_get_sg_table()
H A Dmsm_gem.h71 static inline bool is_active(struct msm_gem_object *msm_obj) is_active() argument
73 return msm_obj->gpu != NULL; is_active()
76 static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj, msm_gem_fence() argument
82 fence = msm_obj->write_fence; msm_gem_fence()
84 fence = max(fence, msm_obj->read_fence); msm_gem_fence()
H A Dmsm_gpu.c523 struct msm_gem_object *msm_obj = submit->bos[i].obj; msm_gpu_submit() local
528 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); msm_gpu_submit()
530 if (!is_active(msm_obj)) { msm_gpu_submit()
534 drm_gem_object_reference(&msm_obj->base); msm_gpu_submit()
535 msm_gem_get_iova_locked(&msm_obj->base, msm_gpu_submit()
540 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); msm_gpu_submit()
543 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); msm_gpu_submit()

Completed in 226 milliseconds