/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_execbuffer.c | 45 struct list_head vmas; member 81 INIT_LIST_HEAD(&eb->vmas); in eb_create() 154 list_add_tail(&vma->exec_list, &eb->vmas); in eb_lookup_vmas() 237 while (!list_empty(&eb->vmas)) { in eb_destroy() 240 vma = list_first_entry(&eb->vmas, in eb_destroy() 568 list_for_each_entry(vma, &eb->vmas, exec_list) { in i915_gem_execbuffer_relocate() 684 struct list_head *vmas, in i915_gem_execbuffer_reserve() argument 696 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; in i915_gem_execbuffer_reserve() 699 while (!list_empty(vmas)) { in i915_gem_execbuffer_reserve() 703 vma = list_first_entry(vmas, struct i915_vma, exec_list); in i915_gem_execbuffer_reserve() [all …]
|
D | intel_lrc.h | 85 struct list_head *vmas,
|
D | intel_lrc.c | 604 struct list_head *vmas) in execlists_move_to_gpu() argument 612 list_for_each_entry(vma, vmas, exec_list) { in execlists_move_to_gpu() 655 struct list_head *vmas, in intel_execlists_submission() argument 711 ret = execlists_move_to_gpu(ringbuf, ctx, vmas); in intel_execlists_submission() 736 i915_gem_execbuffer_move_to_active(vmas, ring); in intel_execlists_submission()
|
D | i915_drv.h | 1822 struct list_head *vmas, 2566 void i915_gem_execbuffer_move_to_active(struct list_head *vmas, 2577 struct list_head *vmas,
|
/linux-4.1.27/mm/ |
D | gup.c | 419 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 460 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages() 502 if (vmas) { in __get_user_pages() 503 vmas[i] = vma; in __get_user_pages() 584 struct vm_area_struct **vmas, in __get_user_pages_locked() argument 593 BUG_ON(vmas); in __get_user_pages_locked() 609 vmas, locked); in __get_user_pages_locked() 814 int force, struct page **pages, struct vm_area_struct **vmas) in get_user_pages() argument 817 pages, vmas, NULL, false, FOLL_TOUCH); in get_user_pages()
|
D | nommu.c | 154 struct vm_area_struct **vmas, int *nonblocking) in __get_user_pages() argument 183 if (vmas) in __get_user_pages() 184 vmas[i] = vma; in __get_user_pages() 204 struct vm_area_struct **vmas) in get_user_pages() argument 213 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
|
D | hugetlb.c | 3386 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page() argument 3471 if (vmas) in follow_hugetlb_page() 3472 vmas[i] = vma; in follow_hugetlb_page()
|
/linux-4.1.27/drivers/video/fbdev/vermilion/ |
D | vermilion.h | 224 atomic_t vmas; member
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_priv.h | 82 struct list_head vmas; member
|
D | ion.c | 245 INIT_LIST_HEAD(&buffer->vmas); in ion_buffer_create() 939 list_for_each_entry(vma_list, &buffer->vmas, list) { in ion_buffer_sync_for_device() 977 list_add(&vma_list->list, &buffer->vmas); in ion_vm_open() 989 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { in ion_vm_close()
|
/linux-4.1.27/Documentation/ |
D | robust-futexes.txt | 54 FUTEX_RECOVER. At do_exit() time, all vmas are searched to see whether 65 microsecond on Linux, but with thousands (or tens of thousands) of vmas
|
/linux-4.1.27/Documentation/vm/ |
D | page_migration | 103 to migration entries or dropped (nonlinear vmas).
|
D | unevictable-lru.txt | 22 - Filtering special vmas.
|
/linux-4.1.27/include/linux/ |
D | mm.h | 1210 struct vm_area_struct **vmas, int *nonblocking); 1214 struct vm_area_struct **vmas);
|