Searched refs:ttm (Results 1 - 82 of 82) sorted by relevance

/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_tt.c43 #include <drm/ttm/ttm_module.h>
44 #include <drm/ttm/ttm_bo_driver.h>
45 #include <drm/ttm/ttm_placement.h>
46 #include <drm/ttm/ttm_page_alloc.h>
49 * Allocates storage for pointers to the pages that back the ttm.
51 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) ttm_tt_alloc_page_directory() argument
53 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*)); ttm_tt_alloc_page_directory()
56 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) ttm_dma_tt_alloc_page_directory() argument
58 ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, ttm_dma_tt_alloc_page_directory()
59 sizeof(*ttm->ttm.pages) + ttm_dma_tt_alloc_page_directory()
60 sizeof(*ttm->dma_address) + ttm_dma_tt_alloc_page_directory()
61 sizeof(*ttm->cpu_address)); ttm_dma_tt_alloc_page_directory()
62 ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); ttm_dma_tt_alloc_page_directory()
63 ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages); ttm_dma_tt_alloc_page_directory()
103 * for range of pages in a ttm.
106 static int ttm_tt_set_caching(struct ttm_tt *ttm, ttm_tt_set_caching() argument
113 if (ttm->caching_state == c_state) ttm_tt_set_caching()
116 if (ttm->state == tt_unpopulated) { ttm_tt_set_caching()
118 ttm->caching_state = c_state; ttm_tt_set_caching()
122 if (ttm->caching_state == tt_cached) ttm_tt_set_caching()
123 drm_clflush_pages(ttm->pages, ttm->num_pages); ttm_tt_set_caching()
125 for (i = 0; i < ttm->num_pages; ++i) { ttm_tt_set_caching()
126 cur_page = ttm->pages[i]; ttm_tt_set_caching()
129 ttm->caching_state, ttm_tt_set_caching()
136 ttm->caching_state = c_state; ttm_tt_set_caching()
142 cur_page = ttm->pages[j]; ttm_tt_set_caching()
145 ttm->caching_state); ttm_tt_set_caching()
152 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) ttm_tt_set_placement_caching() argument
163 return ttm_tt_set_caching(ttm, state); ttm_tt_set_placement_caching()
167 void ttm_tt_destroy(struct ttm_tt *ttm) ttm_tt_destroy() argument
169 if (unlikely(ttm == NULL)) ttm_tt_destroy()
172 if (ttm->state == tt_bound) { ttm_tt_destroy()
173 ttm_tt_unbind(ttm); ttm_tt_destroy()
176 if (ttm->state == tt_unbound) ttm_tt_destroy()
177 ttm_tt_unpopulate(ttm); ttm_tt_destroy()
179 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && ttm_tt_destroy()
180 ttm->swap_storage) ttm_tt_destroy()
181 fput(ttm->swap_storage); ttm_tt_destroy()
183 ttm->swap_storage = NULL; ttm_tt_destroy()
184 ttm->func->destroy(ttm); ttm_tt_destroy()
187 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev, ttm_tt_init() argument
191 ttm->bdev = bdev; ttm_tt_init()
192 ttm->glob = bdev->glob; ttm_tt_init()
193 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm_tt_init()
194 ttm->caching_state = tt_cached; ttm_tt_init()
195 ttm->page_flags = page_flags; ttm_tt_init()
196 ttm->dummy_read_page = dummy_read_page; ttm_tt_init()
197 ttm->state = tt_unpopulated; ttm_tt_init()
198 ttm->swap_storage = NULL; ttm_tt_init()
200 ttm_tt_alloc_page_directory(ttm); ttm_tt_init()
201 if (!ttm->pages) { ttm_tt_init()
202 ttm_tt_destroy(ttm); ttm_tt_init()
210 void ttm_tt_fini(struct ttm_tt *ttm) ttm_tt_fini() argument
212 drm_free_large(ttm->pages); ttm_tt_fini()
213 ttm->pages = NULL; ttm_tt_fini()
221 struct ttm_tt *ttm = &ttm_dma->ttm; ttm_dma_tt_init() local
223 ttm->bdev = bdev; ttm_dma_tt_init()
224 ttm->glob = bdev->glob; ttm_dma_tt_init()
225 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; ttm_dma_tt_init()
226 ttm->caching_state = tt_cached; ttm_dma_tt_init()
227 ttm->page_flags = page_flags; ttm_dma_tt_init()
228 ttm->dummy_read_page = dummy_read_page; ttm_dma_tt_init()
229 ttm->state = tt_unpopulated; ttm_dma_tt_init()
230 ttm->swap_storage = NULL; ttm_dma_tt_init()
234 if (!ttm->pages) { ttm_dma_tt_init()
235 ttm_tt_destroy(ttm); ttm_dma_tt_init()
245 struct ttm_tt *ttm = &ttm_dma->ttm; ttm_dma_tt_fini() local
247 drm_free_large(ttm->pages); ttm_dma_tt_fini()
248 ttm->pages = NULL; ttm_dma_tt_fini()
254 void ttm_tt_unbind(struct ttm_tt *ttm) ttm_tt_unbind() argument
258 if (ttm->state == tt_bound) { ttm_tt_unbind()
259 ret = ttm->func->unbind(ttm); ttm_tt_unbind()
261 ttm->state = tt_unbound; ttm_tt_unbind()
265 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) ttm_tt_bind() argument
269 if (!ttm) ttm_tt_bind()
272 if (ttm->state == tt_bound) ttm_tt_bind()
275 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_tt_bind()
279 ret = ttm->func->bind(ttm, bo_mem); ttm_tt_bind()
283 ttm->state = tt_bound; ttm_tt_bind()
289 int ttm_tt_swapin(struct ttm_tt *ttm) ttm_tt_swapin() argument
298 swap_storage = ttm->swap_storage; ttm_tt_swapin()
303 for (i = 0; i < ttm->num_pages; ++i) { ttm_tt_swapin()
309 to_page = ttm->pages[i]; ttm_tt_swapin()
317 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) ttm_tt_swapin()
319 ttm->swap_storage = NULL; ttm_tt_swapin()
320 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; ttm_tt_swapin()
327 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ttm_tt_swapout() argument
336 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); ttm_tt_swapout()
337 BUG_ON(ttm->caching_state != tt_cached); ttm_tt_swapout()
340 swap_storage = shmem_file_setup("ttm swap", ttm_tt_swapout()
341 ttm->num_pages << PAGE_SHIFT, ttm_tt_swapout()
352 for (i = 0; i < ttm->num_pages; ++i) { ttm_tt_swapout()
353 from_page = ttm->pages[i]; ttm_tt_swapout()
367 ttm_tt_unpopulate(ttm); ttm_tt_swapout()
368 ttm->swap_storage = swap_storage; ttm_tt_swapout()
369 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; ttm_tt_swapout()
371 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; ttm_tt_swapout()
381 static void ttm_tt_clear_mapping(struct ttm_tt *ttm) ttm_tt_clear_mapping() argument
384 struct page **page = ttm->pages; ttm_tt_clear_mapping()
386 if (ttm->page_flags & TTM_PAGE_FLAG_SG) ttm_tt_clear_mapping()
389 for (i = 0; i < ttm->num_pages; ++i) { ttm_tt_clear_mapping()
395 void ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_tt_unpopulate() argument
397 if (ttm->state == tt_unpopulated) ttm_tt_unpopulate()
400 ttm_tt_clear_mapping(ttm); ttm_tt_unpopulate()
401 ttm->bdev->driver->ttm_tt_unpopulate(ttm); ttm_tt_unpopulate()
H A DMakefile5 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
10 obj-$(CONFIG_DRM_TTM) += ttm.o
H A Dttm_agp_backend.c34 #include <drm/ttm/ttm_module.h>
35 #include <drm/ttm/ttm_bo_driver.h>
36 #include <drm/ttm/ttm_page_alloc.h>
38 #include <drm/ttm/ttm_placement.h>
46 struct ttm_tt ttm; member in struct:ttm_agp_backend
51 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) ttm_agp_bind() argument
53 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); ttm_agp_bind()
59 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); ttm_agp_bind()
64 for (i = 0; i < ttm->num_pages; i++) { ttm_agp_bind()
65 struct page *page = ttm->pages[i]; ttm_agp_bind()
68 page = ttm->dummy_read_page; ttm_agp_bind()
84 static int ttm_agp_unbind(struct ttm_tt *ttm) ttm_agp_unbind() argument
86 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); ttm_agp_unbind()
97 static void ttm_agp_destroy(struct ttm_tt *ttm) ttm_agp_destroy() argument
99 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); ttm_agp_destroy()
102 ttm_agp_unbind(ttm); ttm_agp_destroy()
103 ttm_tt_fini(ttm); ttm_agp_destroy()
126 agp_be->ttm.func = &ttm_agp_func; ttm_agp_tt_create()
128 if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) { ttm_agp_tt_create()
133 return &agp_be->ttm; ttm_agp_tt_create()
137 int ttm_agp_tt_populate(struct ttm_tt *ttm) ttm_agp_tt_populate() argument
139 if (ttm->state != tt_unpopulated) ttm_agp_tt_populate()
142 return ttm_pool_populate(ttm); ttm_agp_tt_populate()
146 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) ttm_agp_tt_unpopulate() argument
148 ttm_pool_unpopulate(ttm); ttm_agp_tt_unpopulate()
H A Dttm_bo_util.c31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
51 struct ttm_tt *ttm = bo->ttm; ttm_bo_move_ttm() local
56 ttm_tt_unbind(ttm); ttm_bo_move_ttm()
63 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); ttm_bo_move_ttm()
68 ret = ttm_tt_bind(ttm, new_mem); ttm_bo_move_ttm()
250 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, ttm_copy_io_ttm_page() argument
254 struct page *d = ttm->pages[page]; ttm_copy_io_ttm_page()
287 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, ttm_copy_ttm_io_page() argument
291 struct page *s = ttm->pages[page]; ttm_copy_ttm_io_page()
329 struct ttm_tt *ttm = bo->ttm; ttm_bo_move_memcpy() local
357 (ttm == NULL || (ttm->state == tt_unpopulated && ttm_bo_move_memcpy()
358 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { ttm_bo_move_memcpy()
366 if (ttm && ttm->state == tt_unpopulated) { ttm_bo_move_memcpy()
367 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_bo_move_memcpy()
386 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, ttm_bo_move_memcpy()
391 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, ttm_bo_move_memcpy()
404 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { ttm_bo_move_memcpy()
405 ttm_tt_unbind(ttm); ttm_bo_move_memcpy()
406 ttm_tt_destroy(ttm); ttm_bo_move_memcpy()
407 bo->ttm = NULL; ttm_bo_move_memcpy()
534 struct ttm_tt *ttm = bo->ttm; ttm_bo_kmap_ttm() local
537 BUG_ON(!ttm); ttm_bo_kmap_ttm()
539 if (ttm->state == tt_unpopulated) { ttm_bo_kmap_ttm()
540 ret = ttm->bdev->driver->ttm_tt_populate(ttm); ttm_bo_kmap_ttm()
552 map->page = ttm->pages[start_page]; ttm_bo_kmap_ttm()
561 map->virtual = vmap(ttm->pages + start_page, num_pages, ttm_bo_kmap_ttm()
652 (bo->ttm != NULL)) { ttm_bo_move_accel_cleanup()
653 ttm_tt_unbind(bo->ttm); ttm_bo_move_accel_cleanup()
654 ttm_tt_destroy(bo->ttm); ttm_bo_move_accel_cleanup()
655 bo->ttm = NULL; ttm_bo_move_accel_cleanup()
682 ghost_obj->ttm = NULL; ttm_bo_move_accel_cleanup()
684 bo->ttm = NULL; ttm_bo_move_accel_cleanup()
H A Dttm_page_alloc.c48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
255 * Select the right pool or requested caching state and ttm flags. */ ttm_get_pool()
864 int ttm_pool_populate(struct ttm_tt *ttm) ttm_pool_populate() argument
866 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; ttm_pool_populate()
870 if (ttm->state != tt_unpopulated) ttm_pool_populate()
873 for (i = 0; i < ttm->num_pages; ++i) { ttm_pool_populate()
874 ret = ttm_get_pages(&ttm->pages[i], 1, ttm_pool_populate()
875 ttm->page_flags, ttm_pool_populate()
876 ttm->caching_state); ttm_pool_populate()
878 ttm_pool_unpopulate(ttm); ttm_pool_populate()
882 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ttm_pool_populate()
885 ttm_pool_unpopulate(ttm); ttm_pool_populate()
890 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ttm_pool_populate()
891 ret = ttm_tt_swapin(ttm); ttm_pool_populate()
893 ttm_pool_unpopulate(ttm); ttm_pool_populate()
898 ttm->state = tt_unbound; ttm_pool_populate()
903 void ttm_pool_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate() argument
907 for (i = 0; i < ttm->num_pages; ++i) { ttm_pool_unpopulate()
908 if (ttm->pages[i]) { ttm_pool_unpopulate()
909 ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_pool_unpopulate()
910 ttm->pages[i]); ttm_pool_unpopulate()
911 ttm_put_pages(&ttm->pages[i], 1, ttm_pool_unpopulate()
912 ttm->page_flags, ttm_pool_unpopulate()
913 ttm->caching_state); ttm_pool_unpopulate()
916 ttm->state = tt_unpopulated; ttm_pool_unpopulate()
H A Dttm_bo_vm.c33 #include <ttm/ttm_module.h>
34 #include <ttm/ttm_bo_driver.h>
35 #include <ttm/ttm_placement.h>
94 struct ttm_tt *ttm = NULL; ttm_bo_vm_fault() local
136 if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { ttm_bo_vm_fault()
199 ttm = bo->ttm; ttm_bo_vm_fault()
204 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { ttm_bo_vm_fault()
218 page = ttm->pages[page_offset]; ttm_bo_vm_fault()
H A Dttm_page_alloc_dma.c51 #include <drm/ttm/ttm_bo_driver.h>
52 #include <drm/ttm/ttm_page_alloc.h>
849 struct ttm_tt *ttm = &ttm_dma->ttm; ttm_dma_pool_get_pages() local
857 ttm->pages[index] = d_page->p; ttm_dma_pool_get_pages()
875 struct ttm_tt *ttm = &ttm_dma->ttm; ttm_dma_populate() local
876 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; ttm_dma_populate()
883 if (ttm->state != tt_unpopulated) ttm_dma_populate()
886 type = ttm_to_type(ttm->page_flags, ttm->caching_state); ttm_dma_populate()
887 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) ttm_dma_populate()
891 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) ttm_dma_populate()
903 for (i = 0; i < ttm->num_pages; ++i) { ttm_dma_populate()
910 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], ttm_dma_populate()
918 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { ttm_dma_populate()
919 ret = ttm_tt_swapin(ttm); ttm_dma_populate()
926 ttm->state = tt_unbound; ttm_dma_populate()
934 struct ttm_tt *ttm = &ttm_dma->ttm; ttm_dma_unpopulate() local
942 type = ttm_to_type(ttm->page_flags, ttm->caching_state); ttm_dma_unpopulate()
948 ttm_to_type(ttm->page_flags, tt_cached)) == pool); ttm_dma_unpopulate()
952 ttm->pages[count] = d_page->p; ttm_dma_unpopulate()
976 ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_dma_unpopulate()
982 ttm_mem_global_free_page(ttm->glob->mem_glob, ttm_dma_unpopulate()
983 ttm->pages[i]); ttm_dma_unpopulate()
988 for (i = 0; i < ttm->num_pages; i++) { ttm_dma_unpopulate()
989 ttm->pages[i] = NULL; ttm_dma_unpopulate()
997 ttm->state = tt_unpopulated; ttm_dma_unpopulate()
H A Dttm_module.c34 #include <drm/ttm/ttm_module.h>
41 .name = "ttm",
69 ret = dev_set_name(&ttm_drm_class_device, "ttm"); ttm_init()
H A Dttm_bo.c33 #include <drm/ttm/ttm_module.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
150 if (bo->ttm) ttm_bo_release_list()
151 ttm_tt_destroy(bo->ttm); ttm_bo_release_list()
179 if (bo->ttm != NULL) { ttm_bo_add_to_lru()
242 bo->ttm = NULL; ttm_bo_add_ttm()
252 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
254 if (unlikely(bo->ttm == NULL)) ttm_bo_add_ttm()
258 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, ttm_bo_add_ttm()
261 if (unlikely(bo->ttm == NULL)) { ttm_bo_add_ttm()
265 bo->ttm->sg = bo->sg; ttm_bo_add_ttm()
298 * Create and bind a ttm if required. ttm_bo_handle_move_mem()
302 if (bo->ttm == NULL) { ttm_bo_handle_move_mem()
309 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); ttm_bo_handle_move_mem()
314 ret = ttm_tt_bind(bo->ttm, mem); ttm_bo_handle_move_mem()
374 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_bo_handle_move_mem()
375 ttm_tt_unbind(bo->ttm); ttm_bo_handle_move_mem()
376 ttm_tt_destroy(bo->ttm); ttm_bo_handle_move_mem()
377 bo->ttm = NULL; ttm_bo_handle_move_mem()
396 if (bo->ttm) { ttm_bo_cleanup_memtype_use()
397 ttm_tt_unbind(bo->ttm); ttm_bo_cleanup_memtype_use()
398 ttm_tt_destroy(bo->ttm); ttm_bo_cleanup_memtype_use()
399 bo->ttm = NULL; ttm_bo_cleanup_memtype_use()
1066 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_validate()
1673 * anyone tries to access a ttm page. ttm_bo_swapout()
1679 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); ttm_bo_swapout()
H A Dttm_bo_manager.c31 #include <drm/ttm/ttm_module.h>
32 #include <drm/ttm/ttm_bo_driver.h>
33 #include <drm/ttm/ttm_placement.h>
H A Dttm_execbuf_util.c28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
H A Dttm_lock.c31 #include <drm/ttm/ttm_lock.h>
32 #include <drm/ttm/ttm_module.h>
H A Dttm_object.c39 * ttm objects. Implements reference counting, minimal security checks
61 #include <drm/ttm/ttm_object.h>
62 #include <drm/ttm/ttm_module.h>
86 * This is the per-device data structure needed for ttm object management.
647 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
H A Dttm_memory.c30 #include <drm/ttm/ttm_memory.h>
31 #include <drm/ttm/ttm_module.h>
32 #include <drm/ttm/ttm_page_alloc.h>
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c11 struct ttm_dma_tt ttm; member in struct:nouveau_sgdma_be
16 nouveau_sgdma_destroy(struct ttm_tt *ttm) nouveau_sgdma_destroy() argument
18 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; nouveau_sgdma_destroy()
20 if (ttm) { nouveau_sgdma_destroy()
21 ttm_dma_tt_fini(&nvbe->ttm); nouveau_sgdma_destroy()
27 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) nv04_sgdma_bind() argument
29 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; nv04_sgdma_bind()
32 if (ttm->sg) { nv04_sgdma_bind()
33 node->sg = ttm->sg; nv04_sgdma_bind()
37 node->pages = nvbe->ttm.dma_address; nv04_sgdma_bind()
47 nv04_sgdma_unbind(struct ttm_tt *ttm) nv04_sgdma_unbind() argument
49 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; nv04_sgdma_unbind()
61 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem) nv50_sgdma_bind() argument
63 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; nv50_sgdma_bind()
67 if (ttm->sg) { nv50_sgdma_bind()
68 node->sg = ttm->sg; nv50_sgdma_bind()
72 node->pages = nvbe->ttm.dma_address; nv50_sgdma_bind()
79 nv50_sgdma_unbind(struct ttm_tt *ttm) nv50_sgdma_unbind() argument
104 nvbe->ttm.ttm.func = &nv04_sgdma_backend; nouveau_sgdma_create_ttm()
106 nvbe->ttm.ttm.func = &nv50_sgdma_backend; nouveau_sgdma_create_ttm()
108 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) nouveau_sgdma_create_ttm()
115 return &nvbe->ttm.ttm; nouveau_sgdma_create_ttm()
H A Dnouveau_ttm.c290 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); nouveau_ttm_mmap()
311 global_ref = &drm->ttm.mem_global_ref; nouveau_ttm_global_init()
320 drm->ttm.mem_global_ref.release = NULL; nouveau_ttm_global_init()
324 drm->ttm.bo_global_ref.mem_glob = global_ref->object; nouveau_ttm_global_init()
325 global_ref = &drm->ttm.bo_global_ref.ref; nouveau_ttm_global_init()
334 drm_global_item_unref(&drm->ttm.mem_global_ref); nouveau_ttm_global_init()
335 drm->ttm.mem_global_ref.release = NULL; nouveau_ttm_global_init()
345 if (drm->ttm.mem_global_ref.release == NULL) nouveau_ttm_global_release()
348 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); nouveau_ttm_global_release()
349 drm_global_item_unref(&drm->ttm.mem_global_ref); nouveau_ttm_global_release()
350 drm->ttm.mem_global_ref.release = NULL; nouveau_ttm_global_release()
381 ret = ttm_bo_device_init(&drm->ttm.bdev, nouveau_ttm_init()
382 drm->ttm.bo_global_ref.ref.object, nouveau_ttm_init()
395 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, nouveau_ttm_init()
402 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1), nouveau_ttm_init()
412 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, nouveau_ttm_init()
428 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); nouveau_ttm_fini()
429 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); nouveau_ttm_fini()
432 ttm_bo_device_release(&drm->ttm.bdev); nouveau_ttm_fini()
436 arch_phys_wc_del(drm->ttm.mtrr); nouveau_ttm_fini()
437 drm->ttm.mtrr = 0; nouveau_ttm_fini()
H A Dnouveau_ttm.h7 return container_of(bd, struct nouveau_drm, ttm.bdev); nouveau_bdev()
H A Dnouveau_drm.h40 #include <drm/ttm/ttm_bo_api.h>
41 #include <drm/ttm/ttm_bo_driver.h>
42 #include <drm/ttm/ttm_placement.h>
43 #include <drm/ttm/ttm_memory.h>
44 #include <drm/ttm/ttm_module.h>
45 #include <drm/ttm/ttm_page_alloc.h>
133 } ttm; member in struct:nouveau_drm
H A Dnouveau_bo.c215 nvbo->bo.bdev = &drm->ttm.bdev; nouveau_bo_new()
230 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, nouveau_bo_new()
233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, nouveau_bo_new()
238 /* ttm will call nouveau_bo_del_ttm if it fails.. */ nouveau_bo_new()
463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; nouveau_bo_sync_for_device()
473 for (i = 0; i < ttm_dma->ttm.num_pages; i++) nouveau_bo_sync_for_device()
483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; nouveau_bo_sync_for_cpu()
493 for (i = 0; i < ttm_dma->ttm.num_pages; i++) nouveau_bo_sync_for_cpu()
527 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; _nouveau_bo_mem_index()
1066 struct nouveau_channel *chan = drm->ttm.chan; nouveau_bo_move_m2mf()
1072 * old nvkm_mem node, these will get cleaned up after ttm has nouveau_bo_move_m2mf()
1084 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); nouveau_bo_move_m2mf()
1143 &drm->ttm.copy); nouveau_bo_move_init()
1145 ret = mthd->init(chan, drm->ttm.copy.handle); nouveau_bo_move_init()
1147 nvif_object_fini(&drm->ttm.copy); nouveau_bo_move_init()
1151 drm->ttm.move = mthd->exec; nouveau_bo_move_init()
1152 drm->ttm.chan = chan; nouveau_bo_move_init()
1183 ret = ttm_tt_bind(bo->ttm, &tmp_mem); nouveau_bo_move_flipd()
1238 /* ttm can now (stupidly) pass the driver bos it didn't create... */ nouveau_bo_move_ntfy()
1308 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { nouveau_bo_move()
1316 if (drm->ttm.move) { nouveau_bo_move()
1466 nouveau_ttm_tt_populate(struct ttm_tt *ttm) nouveau_ttm_tt_populate() argument
1468 struct ttm_dma_tt *ttm_dma = (void *)ttm; nouveau_ttm_tt_populate()
1475 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); nouveau_ttm_tt_populate()
1477 if (ttm->state != tt_unpopulated) nouveau_ttm_tt_populate()
1480 if (slave && ttm->sg) { nouveau_ttm_tt_populate()
1482 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, nouveau_ttm_tt_populate()
1483 ttm_dma->dma_address, ttm->num_pages); nouveau_ttm_tt_populate()
1484 ttm->state = tt_unbound; nouveau_ttm_tt_populate()
1488 drm = nouveau_bdev(ttm->bdev); nouveau_ttm_tt_populate()
1498 ttm->caching_state == tt_uncached) nouveau_ttm_tt_populate()
1503 return ttm_agp_tt_populate(ttm); nouveau_ttm_tt_populate()
1509 return ttm_dma_populate((void *)ttm, dev->dev); nouveau_ttm_tt_populate()
1513 r = ttm_pool_populate(ttm); nouveau_ttm_tt_populate()
1518 for (i = 0; i < ttm->num_pages; i++) { nouveau_ttm_tt_populate()
1521 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE, nouveau_ttm_tt_populate()
1530 ttm_pool_unpopulate(ttm); nouveau_ttm_tt_populate()
1540 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) nouveau_ttm_tt_unpopulate() argument
1542 struct ttm_dma_tt *ttm_dma = (void *)ttm; nouveau_ttm_tt_unpopulate()
1548 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); nouveau_ttm_tt_unpopulate()
1553 drm = nouveau_bdev(ttm->bdev); nouveau_ttm_tt_unpopulate()
1563 ttm->caching_state == tt_uncached) { nouveau_ttm_tt_unpopulate()
1570 ttm_agp_tt_unpopulate(ttm); nouveau_ttm_tt_unpopulate()
1577 ttm_dma_unpopulate((void *)ttm, dev->dev); nouveau_ttm_tt_unpopulate()
1582 for (i = 0; i < ttm->num_pages; i++) { nouveau_ttm_tt_unpopulate()
1589 ttm_pool_unpopulate(ttm); nouveau_ttm_tt_unpopulate()
H A Dnouveau_prime.c36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); nouveau_gem_prime_get_sg_table()
80 * to the caller, instead of a normal nouveau_bo ttm reference. */ nouveau_gem_prime_import_sg_table()
H A Dnouveau_bo.h38 /* protect by the ttm reservation lock */
H A Dnouveau_drm.c140 nvif_object_fini(&drm->ttm.copy); nouveau_accel_fini()
564 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); nouveau_do_suspend()
H A Dnouveau_gem.c211 * to the caller, instead of a normal nouveau_bo ttm reference. */ nouveau_gem_new()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_ttm.c32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_page_alloc.h>
238 if (radeon_ttm_tt_has_userptr(bo->ttm)) radeon_verify_access()
337 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); radeon_move_vram_ram()
342 r = ttm_tt_bind(bo->ttm, &tmp_mem); radeon_move_vram_ram()
406 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_bo_move()
520 struct ttm_dma_tt ttm; member in struct:radeon_ttm_tt
530 static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) radeon_ttm_tt_pin_userptr() argument
532 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_pin_userptr()
533 struct radeon_ttm_tt *gtt = (void *)ttm; radeon_ttm_tt_pin_userptr()
547 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; radeon_ttm_tt_pin_userptr()
555 unsigned num_pages = ttm->num_pages - pinned; radeon_ttm_tt_pin_userptr()
557 struct page **pages = ttm->pages + pinned; radeon_ttm_tt_pin_userptr()
566 } while (pinned < ttm->num_pages); radeon_ttm_tt_pin_userptr()
568 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, radeon_ttm_tt_pin_userptr()
569 ttm->num_pages << PAGE_SHIFT, radeon_ttm_tt_pin_userptr()
575 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); radeon_ttm_tt_pin_userptr()
576 if (nents != ttm->sg->nents) radeon_ttm_tt_pin_userptr()
579 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, radeon_ttm_tt_pin_userptr()
580 gtt->ttm.dma_address, ttm->num_pages); radeon_ttm_tt_pin_userptr()
585 kfree(ttm->sg); radeon_ttm_tt_pin_userptr()
588 release_pages(ttm->pages, pinned, 0); radeon_ttm_tt_pin_userptr()
592 static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) radeon_ttm_tt_unpin_userptr() argument
594 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_unpin_userptr()
595 struct radeon_ttm_tt *gtt = (void *)ttm; radeon_ttm_tt_unpin_userptr()
603 if (!ttm->sg->sgl) radeon_ttm_tt_unpin_userptr()
607 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); radeon_ttm_tt_unpin_userptr()
609 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { radeon_ttm_tt_unpin_userptr()
618 sg_free_table(ttm->sg); radeon_ttm_tt_unpin_userptr()
621 static int radeon_ttm_backend_bind(struct ttm_tt *ttm, radeon_ttm_backend_bind() argument
624 struct radeon_ttm_tt *gtt = (void*)ttm; radeon_ttm_backend_bind()
630 radeon_ttm_tt_pin_userptr(ttm); radeon_ttm_backend_bind()
635 if (!ttm->num_pages) { radeon_ttm_backend_bind()
637 ttm->num_pages, bo_mem, ttm); radeon_ttm_backend_bind()
639 if (ttm->caching_state == tt_cached) radeon_ttm_backend_bind()
641 r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, radeon_ttm_backend_bind()
642 ttm->pages, gtt->ttm.dma_address, flags); radeon_ttm_backend_bind()
645 ttm->num_pages, (unsigned)gtt->offset); radeon_ttm_backend_bind()
651 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) radeon_ttm_backend_unbind() argument
653 struct radeon_ttm_tt *gtt = (void *)ttm; radeon_ttm_backend_unbind()
655 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); radeon_ttm_backend_unbind()
658 radeon_ttm_tt_unpin_userptr(ttm); radeon_ttm_backend_unbind()
663 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) radeon_ttm_backend_destroy() argument
665 struct radeon_ttm_tt *gtt = (void *)ttm; radeon_ttm_backend_destroy()
667 ttm_dma_tt_fini(&gtt->ttm); radeon_ttm_backend_destroy()
696 gtt->ttm.ttm.func = &radeon_backend_func; radeon_ttm_tt_create()
698 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) { radeon_ttm_tt_create()
702 return &gtt->ttm.ttm; radeon_ttm_tt_create()
705 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) radeon_ttm_tt_to_gtt() argument
707 if (!ttm || ttm->func != &radeon_backend_func) radeon_ttm_tt_to_gtt()
709 return (struct radeon_ttm_tt *)ttm; radeon_ttm_tt_to_gtt()
712 static int radeon_ttm_tt_populate(struct ttm_tt *ttm) radeon_ttm_tt_populate() argument
714 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); radeon_ttm_tt_populate()
718 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); radeon_ttm_tt_populate()
720 if (ttm->state != tt_unpopulated) radeon_ttm_tt_populate()
724 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); radeon_ttm_tt_populate()
725 if (!ttm->sg) radeon_ttm_tt_populate()
728 ttm->page_flags |= TTM_PAGE_FLAG_SG; radeon_ttm_tt_populate()
729 ttm->state = tt_unbound; radeon_ttm_tt_populate()
733 if (slave && ttm->sg) { radeon_ttm_tt_populate()
734 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, radeon_ttm_tt_populate()
735 gtt->ttm.dma_address, ttm->num_pages); radeon_ttm_tt_populate()
736 ttm->state = tt_unbound; radeon_ttm_tt_populate()
740 rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_populate()
743 return ttm_agp_tt_populate(ttm); radeon_ttm_tt_populate()
749 return ttm_dma_populate(&gtt->ttm, rdev->dev); radeon_ttm_tt_populate()
753 r = ttm_pool_populate(ttm); radeon_ttm_tt_populate()
758 for (i = 0; i < ttm->num_pages; i++) { radeon_ttm_tt_populate()
759 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], radeon_ttm_tt_populate()
762 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { radeon_ttm_tt_populate()
764 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], radeon_ttm_tt_populate()
766 gtt->ttm.dma_address[i] = 0; radeon_ttm_tt_populate()
768 ttm_pool_unpopulate(ttm); radeon_ttm_tt_populate()
775 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) radeon_ttm_tt_unpopulate() argument
778 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); radeon_ttm_tt_unpopulate()
780 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); radeon_ttm_tt_unpopulate()
783 kfree(ttm->sg); radeon_ttm_tt_unpopulate()
784 ttm->page_flags &= ~TTM_PAGE_FLAG_SG; radeon_ttm_tt_unpopulate()
791 rdev = radeon_get_rdev(ttm->bdev); radeon_ttm_tt_unpopulate()
794 ttm_agp_tt_unpopulate(ttm); radeon_ttm_tt_unpopulate()
801 ttm_dma_unpopulate(&gtt->ttm, rdev->dev); radeon_ttm_tt_unpopulate()
806 for (i = 0; i < ttm->num_pages; i++) { radeon_ttm_tt_unpopulate()
807 if (gtt->ttm.dma_address[i]) { radeon_ttm_tt_unpopulate()
808 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], radeon_ttm_tt_unpopulate()
813 ttm_pool_unpopulate(ttm); radeon_ttm_tt_unpopulate()
816 int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, radeon_ttm_tt_set_userptr() argument
819 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); radeon_ttm_tt_set_userptr()
830 bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) radeon_ttm_tt_has_userptr() argument
832 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); radeon_ttm_tt_has_userptr()
840 bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) radeon_ttm_tt_is_readonly() argument
842 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); radeon_ttm_tt_is_readonly()
949 DRM_INFO("radeon: ttm finalized\n"); radeon_ttm_fini()
H A Dradeon_prime.c37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); radeon_gem_prime_get_sg_table()
124 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_gem_prime_export()
H A Drv770_dma.c39 * Used by the radeon ttm implementation to move pages if
H A Dradeon_mn.c145 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) radeon_mn_invalidate_range_start()
H A Dradeon_object.h36 * @mem_type: ttm memory type
38 * Returns corresponding domain of the ttm mem_type
H A Devergreen_dma.c104 * Used by the radeon ttm implementation to move pages if
H A Dradeon_gem.c323 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); radeon_gem_userptr_ioctl()
412 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { radeon_mode_dumb_mmap()
707 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) radeon_gem_op_ioctl()
H A Dsi_dma.c228 * Used by the radeon ttm implementation to move pages if
H A Dradeon.h71 #include <ttm/ttm_bo_api.h>
72 #include <ttm/ttm_bo_driver.h>
73 #include <ttm/ttm_placement.h>
74 #include <ttm/ttm_module.h>
75 #include <ttm/ttm_execbuf_util.h>
2987 extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2989 extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
2990 extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
H A Dradeon_object.c327 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) radeon_bo_pin_restricted()
436 /* this should unref the ttm bo */ radeon_bo_force_delete()
H A Dr600_dma.c436 * Used by the radeon ttm implementation to move pages if
H A Dradeon_vm.c906 * @mem: ttm mem
937 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) radeon_vm_bo_update()
H A Dradeon_cs.c154 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { radeon_cs_parser_relocs()
H A Dcik_sdma.c576 * Used by the radeon ttm implementation to move pages if
H A Dr600.c2911 * Used by the radeon ttm implementation to move pages if
H A Dcik.c4039 * Used by the radeon ttm implementation to move pages if
/linux-4.1.27/drivers/gpu/drm/qxl/
H A Dqxl_ttm.c26 #include <ttm/ttm_bo_api.h>
27 #include <ttm/ttm_bo_driver.h>
28 #include <ttm/ttm_placement.h>
29 #include <ttm/ttm_page_alloc.h>
30 #include <ttm/ttm_module.h>
258 struct ttm_dma_tt ttm; member in struct:qxl_ttm_tt
263 static int qxl_ttm_backend_bind(struct ttm_tt *ttm, qxl_ttm_backend_bind() argument
266 struct qxl_ttm_tt *gtt = (void *)ttm; qxl_ttm_backend_bind()
269 if (!ttm->num_pages) { qxl_ttm_backend_bind()
271 ttm->num_pages, bo_mem, ttm); qxl_ttm_backend_bind()
277 static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) qxl_ttm_backend_unbind() argument
283 static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) qxl_ttm_backend_destroy() argument
285 struct qxl_ttm_tt *gtt = (void *)ttm; qxl_ttm_backend_destroy()
287 ttm_dma_tt_fini(&gtt->ttm); qxl_ttm_backend_destroy()
297 static int qxl_ttm_tt_populate(struct ttm_tt *ttm) qxl_ttm_tt_populate() argument
301 if (ttm->state != tt_unpopulated) qxl_ttm_tt_populate()
304 r = ttm_pool_populate(ttm); qxl_ttm_tt_populate()
311 static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm) qxl_ttm_tt_unpopulate() argument
313 ttm_pool_unpopulate(ttm); qxl_ttm_tt_unpopulate()
327 gtt->ttm.ttm.func = &qxl_backend_func; qxl_ttm_tt_create()
329 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, qxl_ttm_tt_create()
334 return &gtt->ttm.ttm; qxl_ttm_tt_create()
353 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_bo_move()
441 DRM_INFO("qxl: ttm finalized\n"); qxl_ttm_fini()
H A Dqxl_drv.h41 #include <ttm/ttm_bo_api.h>
42 #include <ttm/ttm_bo_driver.h>
43 #include <ttm/ttm_placement.h>
44 #include <ttm/ttm_module.h>
49 #include <ttm/ttm_execbuf_util.h>
437 /* qxl ttm */
H A Dqxl_object.c285 /* this should unref the ttm bo */ qxl_bo_force_delete()
/linux-4.1.27/drivers/gpu/drm/ast/
H A Dast_ttm.c30 #include <ttm/ttm_page_alloc.h>
35 return container_of(bd, struct ast_private, ttm.bdev); ast_bdev()
55 global_ref = &ast->ttm.mem_global_ref; ast_ttm_global_init()
67 ast->ttm.bo_global_ref.mem_glob = ast_ttm_global_init()
68 ast->ttm.mem_global_ref.object; ast_ttm_global_init()
69 global_ref = &ast->ttm.bo_global_ref.ref; ast_ttm_global_init()
77 drm_global_item_unref(&ast->ttm.mem_global_ref); ast_ttm_global_init()
86 if (ast->ttm.mem_global_ref.release == NULL) ast_ttm_global_release()
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); ast_ttm_global_release()
90 drm_global_item_unref(&ast->ttm.mem_global_ref); ast_ttm_global_release()
91 ast->ttm.mem_global_ref.release = NULL; ast_ttm_global_release()
228 static int ast_ttm_tt_populate(struct ttm_tt *ttm) ast_ttm_tt_populate() argument
230 return ttm_pool_populate(ttm); ast_ttm_tt_populate()
233 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) ast_ttm_tt_unpopulate() argument
235 ttm_pool_unpopulate(ttm); ast_ttm_tt_unpopulate()
254 struct ttm_bo_device *bdev = &ast->ttm.bdev; ast_mm_init()
260 ret = ttm_bo_device_init(&ast->ttm.bdev, ast_mm_init()
261 ast->ttm.bo_global_ref.ref.object, ast_mm_init()
274 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); ast_mm_init()
286 ttm_bo_device_release(&ast->ttm.bdev); ast_mm_fini()
332 astbo->bo.bdev = &ast->ttm.bdev; ast_bo_create()
336 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size, ast_bo_create()
339 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, ast_bo_create()
434 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev); ast_mmap()
H A Dast_drv.h33 #include <drm/ttm/ttm_bo_api.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_memory.h>
37 #include <drm/ttm/ttm_module.h>
107 } ttm; member in struct:ast_private
/linux-4.1.27/drivers/gpu/drm/cirrus/
H A Dcirrus_ttm.c30 #include <ttm/ttm_page_alloc.h>
35 return container_of(bd, struct cirrus_device, ttm.bdev); cirrus_bdev()
55 global_ref = &cirrus->ttm.mem_global_ref; cirrus_ttm_global_init()
67 cirrus->ttm.bo_global_ref.mem_glob = cirrus_ttm_global_init()
68 cirrus->ttm.mem_global_ref.object; cirrus_ttm_global_init()
69 global_ref = &cirrus->ttm.bo_global_ref.ref; cirrus_ttm_global_init()
77 drm_global_item_unref(&cirrus->ttm.mem_global_ref); cirrus_ttm_global_init()
86 if (cirrus->ttm.mem_global_ref.release == NULL) cirrus_ttm_global_release()
89 drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref); cirrus_ttm_global_release()
90 drm_global_item_unref(&cirrus->ttm.mem_global_ref); cirrus_ttm_global_release()
91 cirrus->ttm.mem_global_ref.release = NULL; cirrus_ttm_global_release()
228 static int cirrus_ttm_tt_populate(struct ttm_tt *ttm) cirrus_ttm_tt_populate() argument
230 return ttm_pool_populate(ttm); cirrus_ttm_tt_populate()
233 static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm) cirrus_ttm_tt_unpopulate() argument
235 ttm_pool_unpopulate(ttm); cirrus_ttm_tt_unpopulate()
254 struct ttm_bo_device *bdev = &cirrus->ttm.bdev; cirrus_mm_init()
260 ret = ttm_bo_device_init(&cirrus->ttm.bdev, cirrus_mm_init()
261 cirrus->ttm.bo_global_ref.ref.object, cirrus_mm_init()
274 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); cirrus_mm_init()
290 ttm_bo_device_release(&cirrus->ttm.bdev); cirrus_mm_fini()
336 cirrusbo->bo.bdev = &cirrus->ttm.bdev; cirrus_bo_create()
340 acc_size = ttm_bo_dma_acc_size(&cirrus->ttm.bdev, size, cirrus_bo_create()
343 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, cirrus_bo_create()
418 return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev); cirrus_mmap()
H A Dcirrus_drv.h18 #include <drm/ttm/ttm_bo_api.h>
19 #include <drm/ttm/ttm_bo_driver.h>
20 #include <drm/ttm/ttm_placement.h>
21 #include <drm/ttm/ttm_memory.h>
22 #include <drm/ttm/ttm_module.h>
148 } ttm; member in struct:cirrus_device
/linux-4.1.27/drivers/gpu/drm/mgag200/
H A Dmgag200_ttm.c30 #include <ttm/ttm_page_alloc.h>
35 return container_of(bd, struct mga_device, ttm.bdev); mgag200_bdev()
55 global_ref = &ast->ttm.mem_global_ref; mgag200_ttm_global_init()
67 ast->ttm.bo_global_ref.mem_glob = mgag200_ttm_global_init()
68 ast->ttm.mem_global_ref.object; mgag200_ttm_global_init()
69 global_ref = &ast->ttm.bo_global_ref.ref; mgag200_ttm_global_init()
77 drm_global_item_unref(&ast->ttm.mem_global_ref); mgag200_ttm_global_init()
86 if (ast->ttm.mem_global_ref.release == NULL) mgag200_ttm_global_release()
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref); mgag200_ttm_global_release()
90 drm_global_item_unref(&ast->ttm.mem_global_ref); mgag200_ttm_global_release()
91 ast->ttm.mem_global_ref.release = NULL; mgag200_ttm_global_release()
228 static int mgag200_ttm_tt_populate(struct ttm_tt *ttm) mgag200_ttm_tt_populate() argument
230 return ttm_pool_populate(ttm); mgag200_ttm_tt_populate()
233 static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm) mgag200_ttm_tt_unpopulate() argument
235 ttm_pool_unpopulate(ttm); mgag200_ttm_tt_unpopulate()
254 struct ttm_bo_device *bdev = &mdev->ttm.bdev; mgag200_mm_init()
260 ret = ttm_bo_device_init(&mdev->ttm.bdev, mgag200_mm_init()
261 mdev->ttm.bo_global_ref.ref.object, mgag200_mm_init()
273 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); mgag200_mm_init()
285 ttm_bo_device_release(&mdev->ttm.bdev); mgag200_mm_fini()
332 mgabo->bo.bdev = &mdev->ttm.bdev; mgag200_bo_create()
336 acc_size = ttm_bo_dma_acc_size(&mdev->ttm.bdev, size, mgag200_bo_create()
339 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, mgag200_bo_create()
435 return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev); mgag200_mmap()
H A Dmgag200_drv.h19 #include <drm/ttm/ttm_bo_api.h>
20 #include <drm/ttm/ttm_bo_driver.h>
21 #include <drm/ttm/ttm_placement.h>
22 #include <drm/ttm/ttm_memory.h>
23 #include <drm/ttm/ttm_module.h>
215 } ttm; member in struct:mga_device
/linux-4.1.27/include/drm/ttm/
H A Dttm_page_alloc.h29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_memory.h>
46 * @ttm: The struct ttm_tt to contain the backing pages.
48 * Add backing pages to all of @ttm
50 extern int ttm_pool_populate(struct ttm_tt *ttm);
55 * @ttm: The struct ttm_tt which to free backing pages.
57 * Free all pages of @ttm
59 extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
H A Dttm_bo_driver.h33 #include <ttm/ttm_bo_api.h>
34 #include <ttm/ttm_memory.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_placement.h>
49 * @ttm: Pointer to a struct ttm_tt.
57 int (*bind) (struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
62 * @ttm: Pointer to a struct ttm_tt.
67 int (*unbind) (struct ttm_tt *ttm);
72 * @ttm: Pointer to a struct ttm_tt.
77 void (*destroy) (struct ttm_tt *ttm);
104 * @be: Pointer to the ttm backend.
135 * @ttm: Base ttm_tt struct.
145 struct ttm_tt ttm; member in struct:ttm_dma_tt
339 * @ttm: The struct ttm_tt to contain the backing pages.
345 int (*ttm_tt_populate)(struct ttm_tt *ttm);
350 * @ttm: The struct ttm_tt to contain the backing pages.
354 void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
568 * @ttm: The struct ttm_tt.
579 extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
589 * @ttm: the ttm_tt structure.
593 extern void ttm_tt_fini(struct ttm_tt *ttm);
599 * @ttm: The struct ttm_tt containing backing pages.
602 * Bind the pages of @ttm to an aperture location identified by @bo_mem
604 extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
609 * @ttm: The struct ttm_tt.
613 extern void ttm_tt_destroy(struct ttm_tt *ttm);
618 * @ttm: The struct ttm_tt.
622 extern void ttm_tt_unbind(struct ttm_tt *ttm);
627 * @ttm: The struct ttm_tt.
631 extern int ttm_tt_swapin(struct ttm_tt *ttm);
636 * @ttm A struct ttm_tt the backing pages of which will change caching policy.
640 * the pages backing @ttm. If changing from cached to uncached or
646 extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
647 extern int ttm_tt_swapout(struct ttm_tt *ttm,
651 * ttm_tt_unpopulate - free pages from a ttm
653 * @ttm: Pointer to the ttm_tt structure
655 * Calls the driver method to free all pages from a ttm
657 extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
1055 int ttm_agp_tt_populate(struct ttm_tt *ttm);
1056 void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
H A Dttm_object.h33 * ttm objects. Implements reference counting, minimal security checks
45 #include <ttm/ttm_memory.h>
72 * One entry per ttm object type.
300 * data structures needed for ttm base and ref objects.
H A Dttm_lock.h52 #include <ttm/ttm_object.h>
59 * @base: ttm base object used solely to release the lock if the client
H A Dttm_execbuf_util.h34 #include <ttm/ttm_bo_api.h>
H A Dttm_bo_api.h170 * @ttm: TTM structure holding system pages.
220 struct ttm_tt *ttm; member in struct:ttm_buffer_object
463 * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
646 * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
660 * ttm_bo_mmap - mmap out of the ttm device address space.
685 * This function implements read / write into ttm buffer objects, and is
/linux-4.1.27/drivers/gpu/drm/bochs/
H A Dbochs_mm.c16 return container_of(bd, struct bochs_device, ttm.bdev); bochs_bdev()
34 global_ref = &bochs->ttm.mem_global_ref; bochs_ttm_global_init()
46 bochs->ttm.bo_global_ref.mem_glob = bochs_ttm_global_init()
47 bochs->ttm.mem_global_ref.object; bochs_ttm_global_init()
48 global_ref = &bochs->ttm.bo_global_ref.ref; bochs_ttm_global_init()
56 drm_global_item_unref(&bochs->ttm.mem_global_ref); bochs_ttm_global_init()
65 if (bochs->ttm.mem_global_ref.release == NULL) bochs_ttm_global_release()
68 drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); bochs_ttm_global_release()
69 drm_global_item_unref(&bochs->ttm.mem_global_ref); bochs_ttm_global_release()
70 bochs->ttm.mem_global_ref.release = NULL; bochs_ttm_global_release()
219 struct ttm_bo_device *bdev = &bochs->ttm.bdev; bochs_mm_init()
226 ret = ttm_bo_device_init(&bochs->ttm.bdev, bochs_mm_init()
227 bochs->ttm.bo_global_ref.ref.object, bochs_mm_init()
240 DRM_ERROR("Failed ttm VRAM init: %d\n", ret); bochs_mm_init()
244 bochs->ttm.initialized = true; bochs_mm_init()
250 if (!bochs->ttm.initialized) bochs_mm_fini()
253 ttm_bo_device_release(&bochs->ttm.bdev); bochs_mm_fini()
255 bochs->ttm.initialized = false; bochs_mm_fini()
346 return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev); bochs_mmap()
369 bochsbo->bo.bdev = &bochs->ttm.bdev; bochs_bo_create()
374 acc_size = ttm_bo_dma_acc_size(&bochs->ttm.bdev, size, bochs_bo_create()
377 ret = ttm_bo_init(&bochs->ttm.bdev, &bochsbo->bo, size, bochs_bo_create()
H A Dbochs.h12 #include <ttm/ttm_bo_driver.h>
13 #include <ttm/ttm_page_alloc.h>
80 /* ttm */
86 } ttm; member in struct:bochs_device
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_buffer.c29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/ttm/ttm_page_alloc.h>
394 vsgt->pages = vmw_tt->dma_ttm.ttm.pages; vmw_ttm_map_dma()
395 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; vmw_ttm_map_dma()
504 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_map_dma()
521 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_unmap_dma()
541 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_bo_sg_table()
547 static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) vmw_ttm_bind() argument
550 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_bind()
563 ttm->num_pages, vmw_be->gmr_id); vmw_ttm_bind()
567 vmw_mob_create(ttm->num_pages); vmw_ttm_bind()
573 &vmw_be->vsgt, ttm->num_pages, vmw_ttm_bind()
581 static int vmw_ttm_unbind(struct ttm_tt *ttm) vmw_ttm_unbind() argument
584 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_unbind()
604 static void vmw_ttm_destroy(struct ttm_tt *ttm) vmw_ttm_destroy() argument
607 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_destroy()
613 ttm_tt_fini(ttm); vmw_ttm_destroy()
622 static int vmw_ttm_populate(struct ttm_tt *ttm) vmw_ttm_populate() argument
625 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); vmw_ttm_populate()
630 if (ttm->state != tt_unpopulated) vmw_ttm_populate()
635 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); vmw_ttm_populate()
644 ret = ttm_pool_populate(ttm); vmw_ttm_populate()
649 static void vmw_ttm_unpopulate(struct ttm_tt *ttm) vmw_ttm_unpopulate() argument
651 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, vmw_ttm_unpopulate()
652 dma_ttm.ttm); vmw_ttm_unpopulate()
665 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); vmw_ttm_unpopulate()
670 ttm_pool_unpopulate(ttm); vmw_ttm_unpopulate()
690 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; vmw_ttm_tt_create()
698 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, vmw_ttm_tt_create()
703 return &vmw_be->dma_ttm.ttm; vmw_ttm_tt_create()
H A Dvmwgfx_gmrid_manager.c32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_prime.c35 #include <drm/ttm/ttm_object.h>
H A Dvmwgfx_gmr.c30 #include <drm/ttm/ttm_bo_driver.h>
H A Dvmwgfx_drv.h36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_object.h>
38 #include <drm/ttm/ttm_lock.h>
39 #include <drm/ttm/ttm_execbuf_util.h>
40 #include <drm/ttm/ttm_module.h>
H A Dvmwgfx_drv.c32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/ttm/ttm_bo_driver.h>
34 #include <drm/ttm/ttm_object.h>
35 #include <drm/ttm/ttm_module.h>
H A Dvmwgfx_mob.c277 ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); vmw_otables_setup()
404 ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); vmw_mob_pt_populate()
H A Dvmwgfx_dmabuf.c28 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_fb.c34 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_fifo.c30 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_overlay.c32 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_shader.c30 #include "ttm/ttm_placement.h"
H A Dvmwgfx_context.c30 #include "ttm/ttm_placement.h"
H A Dvmwgfx_resource.c30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
H A Dvmwgfx_surface.c30 #include <ttm/ttm_placement.h>
H A Dvmwgfx_execbuf.c30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
/linux-4.1.27/arch/powerpc/perf/
H A Dppc970-pmu.c264 unsigned int ttm, grp; p970_compute_mmcr() local
321 ttm = unitmap[i]; p970_compute_mmcr()
322 ++ttmuse[(ttm >> 2) & 1]; p970_compute_mmcr()
323 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; p970_compute_mmcr()
335 ttm = (unitmap[unit] >> 2) & 1; p970_compute_mmcr()
337 ttm = 2; p970_compute_mmcr()
339 ttm = 3; p970_compute_mmcr()
343 mmcr1 |= (unsigned long)ttm p970_compute_mmcr()
H A Dpower4-pmu.c363 unsigned int ttm, grp; p4_compute_mmcr() local
461 ttm = unit - 1; /* 2->1, 3->2 */ p4_compute_mmcr()
463 ttm = unit >> 2; p4_compute_mmcr()
464 mmcr1 |= (unsigned long)ttm p4_compute_mmcr()
H A Dpower5+-pmu.c460 unsigned int ttm; power5p_compute_mmcr() local
545 ttm = unit >> 2; power5p_compute_mmcr()
546 mmcr1 |= (unsigned long)ttm power5p_compute_mmcr()
H A Dpower5-pmu.c391 unsigned int ttm, grp; power5_compute_mmcr() local
485 ttm = unit >> 2; power5_compute_mmcr()
486 mmcr1 |= (unsigned long)ttm power5_compute_mmcr()
/linux-4.1.27/drivers/gpu/drm/
H A DMakefile37 obj-$(CONFIG_DRM_TTM) += ttm/
/linux-4.1.27/include/linux/
H A Dreservation.h2 * Header file for reservations for dma-buf and ttm
/linux-4.1.27/arch/x86/platform/uv/
H A Dtlb_uv.c518 cycles_t ttm; uv1_wait_completion() local
535 ttm = get_cycles(); uv1_wait_completion()
543 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { uv1_wait_completion()
614 cycles_t ttm; uv2_3_wait_completion() local
634 ttm = get_cycles(); uv2_3_wait_completion()
645 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { uv2_3_wait_completion()
660 ttm = get_cycles(); uv2_3_wait_completion()
661 if ((ttm - bcp->send_message) > bcp->timeout_interval) uv2_3_wait_completion()
/linux-4.1.27/drivers/net/ethernet/dec/tulip/
H A Deeprom.c117 0x00, 0x06 /* ttm bit map */ tulip_build_fake_mediatable()
H A Dde4x5.c516 u_int ttm; /* Transmit Threshold Mode for each media */ member in struct:mii_phy
4629 lp->phy[lp->active].ttm = get_unaligned_le16(p); type1_infoblock()
4710 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; type3_infoblock()
/linux-4.1.27/mm/
H A Dshmem.c3431 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.

Completed in 8302 milliseconds