Lines Matching refs:msm_obj
29 struct msm_gem_object *msm_obj = to_msm_bo(obj); in physaddr() local
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + in physaddr()
37 struct msm_gem_object *msm_obj = to_msm_bo(obj); in use_pages() local
38 return !msm_obj->vram_node; in use_pages()
45 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages_vram() local
55 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, in get_pages_vram()
74 struct msm_gem_object *msm_obj = to_msm_bo(obj); in get_pages() local
76 if (!msm_obj->pages) { in get_pages()
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); in get_pages()
93 if (IS_ERR(msm_obj->sgt)) { in get_pages()
95 return ERR_CAST(msm_obj->sgt); in get_pages()
98 msm_obj->pages = p; in get_pages()
103 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in get_pages()
104 dma_map_sg(dev->dev, msm_obj->sgt->sgl, in get_pages()
105 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in get_pages()
108 return msm_obj->pages; in get_pages()
113 struct msm_gem_object *msm_obj = to_msm_bo(obj); in put_pages() local
115 if (msm_obj->pages) { in put_pages()
119 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) in put_pages()
120 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, in put_pages()
121 msm_obj->sgt->nents, DMA_BIDIRECTIONAL); in put_pages()
122 sg_free_table(msm_obj->sgt); in put_pages()
123 kfree(msm_obj->sgt); in put_pages()
126 drm_gem_put_pages(obj, msm_obj->pages, true, false); in put_pages()
128 drm_mm_remove_node(msm_obj->vram_node); in put_pages()
129 drm_free_large(msm_obj->pages); in put_pages()
132 msm_obj->pages = NULL; in put_pages()
154 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_mmap_obj() local
159 if (msm_obj->flags & MSM_BO_WC) { in msm_gem_mmap_obj()
161 } else if (msm_obj->flags & MSM_BO_UNCACHED) { in msm_gem_mmap_obj()
286 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova_locked() local
289 if (!msm_obj->domain[id].iova) { in msm_gem_get_iova_locked()
304 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt, in msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; in msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); in msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova_locked()
321 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_get_iova() local
327 if (msm_obj->domain[id].iova) { in msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova()
343 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_iova() local
344 WARN_ON(!msm_obj->domain[id].iova); in msm_gem_iova()
345 return msm_obj->domain[id].iova; in msm_gem_iova()
390 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_vaddr_locked() local
392 if (!msm_obj->vaddr) { in msm_gem_vaddr_locked()
396 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, in msm_gem_vaddr_locked()
399 return msm_obj->vaddr; in msm_gem_vaddr_locked()
417 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_queue_inactive_cb() local
418 uint32_t fence = msm_gem_fence(msm_obj, in msm_gem_queue_inactive_cb()
426 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_move_to_active() local
427 msm_obj->gpu = gpu; in msm_gem_move_to_active()
429 msm_obj->write_fence = fence; in msm_gem_move_to_active()
431 msm_obj->read_fence = fence; in msm_gem_move_to_active()
432 list_del_init(&msm_obj->mm_list); in msm_gem_move_to_active()
433 list_add_tail(&msm_obj->mm_list, &gpu->active_list); in msm_gem_move_to_active()
440 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_move_to_inactive() local
444 msm_obj->gpu = NULL; in msm_gem_move_to_inactive()
445 msm_obj->read_fence = 0; in msm_gem_move_to_inactive()
446 msm_obj->write_fence = 0; in msm_gem_move_to_inactive()
447 list_del_init(&msm_obj->mm_list); in msm_gem_move_to_inactive()
448 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_move_to_inactive()
455 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_cpu_prep() local
458 if (is_active(msm_obj)) { in msm_gem_cpu_prep()
459 uint32_t fence = msm_gem_fence(msm_obj, op); in msm_gem_cpu_prep()
482 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_describe() local
487 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', in msm_gem_describe()
488 msm_obj->read_fence, msm_obj->write_fence, in msm_gem_describe()
490 off, msm_obj->vaddr, obj->size); in msm_gem_describe()
495 struct msm_gem_object *msm_obj; in msm_gem_describe_objects() local
499 list_for_each_entry(msm_obj, list, mm_list) { in msm_gem_describe_objects()
500 struct drm_gem_object *obj = &msm_obj->base; in msm_gem_describe_objects()
515 struct msm_gem_object *msm_obj = to_msm_bo(obj); in msm_gem_free_object() local
521 WARN_ON(is_active(msm_obj)); in msm_gem_free_object()
523 list_del(&msm_obj->mm_list); in msm_gem_free_object()
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { in msm_gem_free_object()
527 if (mmu && msm_obj->domain[id].iova) { in msm_gem_free_object()
528 uint32_t offset = msm_obj->domain[id].iova; in msm_gem_free_object()
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); in msm_gem_free_object()
534 if (msm_obj->vaddr) in msm_gem_free_object()
535 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); in msm_gem_free_object()
540 if (msm_obj->pages) in msm_gem_free_object()
541 drm_free_large(msm_obj->pages); in msm_gem_free_object()
544 vunmap(msm_obj->vaddr); in msm_gem_free_object()
548 if (msm_obj->resv == &msm_obj->_resv) in msm_gem_free_object()
549 reservation_object_fini(msm_obj->resv); in msm_gem_free_object()
553 kfree(msm_obj); in msm_gem_free_object()
587 struct msm_gem_object *msm_obj; in msm_gem_new_impl() local
610 sz = sizeof(*msm_obj); in msm_gem_new_impl()
614 msm_obj = kzalloc(sz, GFP_KERNEL); in msm_gem_new_impl()
615 if (!msm_obj) in msm_gem_new_impl()
619 msm_obj->vram_node = (void *)&msm_obj[1]; in msm_gem_new_impl()
621 msm_obj->flags = flags; in msm_gem_new_impl()
623 msm_obj->resv = &msm_obj->_resv; in msm_gem_new_impl()
624 reservation_object_init(msm_obj->resv); in msm_gem_new_impl()
626 INIT_LIST_HEAD(&msm_obj->submit_entry); in msm_gem_new_impl()
627 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); in msm_gem_new_impl()
629 *obj = &msm_obj->base; in msm_gem_new_impl()
668 struct msm_gem_object *msm_obj; in msm_gem_import() local
688 msm_obj = to_msm_bo(obj); in msm_gem_import()
689 msm_obj->sgt = sgt; in msm_gem_import()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in msm_gem_import()
691 if (!msm_obj->pages) { in msm_gem_import()
696 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); in msm_gem_import()