/linux-4.4.14/drivers/gpu/drm/i915/ |
D | i915_gem_execbuffer.c | 45 struct list_head vmas; member 81 INIT_LIST_HEAD(&eb->vmas); in eb_create() 154 list_add_tail(&vma->exec_list, &eb->vmas); in eb_lookup_vmas() 232 while (!list_empty(&eb->vmas)) { in eb_destroy() 235 vma = list_first_entry(&eb->vmas, in eb_destroy() 562 list_for_each_entry(vma, &eb->vmas, exec_list) { in i915_gem_execbuffer_relocate() 690 struct list_head *vmas, in i915_gem_execbuffer_reserve() argument 703 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; in i915_gem_execbuffer_reserve() 706 while (!list_empty(vmas)) { in i915_gem_execbuffer_reserve() 710 vma = list_first_entry(vmas, struct i915_vma, exec_list); in i915_gem_execbuffer_reserve() [all …]
|
D | intel_lrc.h | 95 struct list_head *vmas);
|
D | intel_lrc.c | 624 struct list_head *vmas) in execlists_move_to_gpu() argument 632 list_for_each_entry(vma, vmas, exec_list) { in execlists_move_to_gpu() 870 struct list_head *vmas) in intel_execlists_submission() argument 912 ret = execlists_move_to_gpu(params->request, vmas); in intel_execlists_submission() 940 i915_gem_execbuffer_move_to_active(vmas, params->request); in intel_execlists_submission()
|
D | i915_dma.c | 1124 kmem_cache_destroy(dev_priv->vmas); in i915_driver_load() 1216 kmem_cache_destroy(dev_priv->vmas); in i915_driver_unload()
|
D | i915_drv.h | 1704 struct kmem_cache *vmas; member 1948 struct list_head *vmas); 2786 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2791 struct list_head *vmas);
|
D | i915_gem.c | 4585 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); in i915_gem_vma_destroy() 4994 dev_priv->vmas = in i915_gem_load()
|
D | i915_gem_gtt.c | 3214 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL); in __i915_gem_vma_create()
|
/linux-4.4.14/mm/ |
D | gup.c | 456 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 497 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages() 545 if (vmas) { in __get_user_pages() 546 vmas[i] = vma; in __get_user_pages() 627 struct vm_area_struct **vmas, in __get_user_pages_locked() argument 636 BUG_ON(vmas); in __get_user_pages_locked() 652 vmas, locked); in __get_user_pages_locked() 857 int force, struct page **pages, struct vm_area_struct **vmas) in get_user_pages() argument 860 pages, vmas, NULL, false, FOLL_TOUCH); in get_user_pages()
|
D | nommu.c | 138 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 167 if (vmas) in __get_user_pages() 168 vmas[i] = vma; in __get_user_pages() 188 struct vm_area_struct **vmas) in get_user_pages() argument 197 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
|
D | hugetlb.c | 3823 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page() argument 3908 if (vmas) in follow_hugetlb_page() 3909 vmas[i] = vma; in follow_hugetlb_page()
|
/linux-4.4.14/drivers/video/fbdev/vermilion/ |
D | vermilion.h | 224 atomic_t vmas; member
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_priv.h | 82 struct list_head vmas; member
|
D | ion.c | 242 INIT_LIST_HEAD(&buffer->vmas); in ion_buffer_create() 941 list_for_each_entry(vma_list, &buffer->vmas, list) { in ion_buffer_sync_for_device() 979 list_add(&vma_list->list, &buffer->vmas); in ion_vm_open() 991 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { in ion_vm_close()
|
/linux-4.4.14/Documentation/vm/ |
D | userfaultfd.txt | 29 operations never involve heavyweight structures like vmas (in fact the 34 Terabytes. Too many vmas would be needed for that.
|
D | unevictable-lru.txt | 22 - Filtering special vmas.
|
/linux-4.4.14/Documentation/ |
D | robust-futexes.txt | 54 FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether 65 microsecond on Linux, but with thousands (or tens of thousands) of vmas
|
/linux-4.4.14/include/linux/ |
D | mm.h | 1195 struct vm_area_struct **vmas, int *nonblocking); 1199 struct vm_area_struct **vmas);
|