Lines Matching refs:vma

19 static struct page *no_page_table(struct vm_area_struct *vma,  in no_page_table()  argument
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
35 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
38 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
45 return no_page_table(vma, flags); in follow_page_pte()
74 page = vm_normal_page(vma, address, pte); in follow_page_pte()
95 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
127 return no_page_table(vma, flags); in follow_page_pte()
143 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
164 return no_page_table(vma, flags); in follow_page_mask()
168 return no_page_table(vma, flags); in follow_page_mask()
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
173 return no_page_table(vma, flags); in follow_page_mask()
176 return no_page_table(vma, flags); in follow_page_mask()
180 return no_page_table(vma, flags); in follow_page_mask()
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
185 return no_page_table(vma, flags); in follow_page_mask()
188 return no_page_table(vma, flags); in follow_page_mask()
191 split_huge_page_pmd(vma, address, pmd); in follow_page_mask()
192 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
198 wait_split_huge_page(vma->anon_vma, pmd); in follow_page_mask()
200 page = follow_trans_huge_pmd(vma, address, in follow_page_mask()
209 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
213 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() argument
239 *vma = get_gate_vma(mm); in get_gate_page()
242 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
261 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, in faultin_page() argument
264 struct mm_struct *mm = vma->vm_mm; in faultin_page()
270 (stack_guard_page_start(vma, address) || in faultin_page()
271 stack_guard_page_end(vma, address + PAGE_SIZE))) in faultin_page()
284 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page()
317 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
322 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) in check_vma_flags() argument
324 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
423 struct vm_area_struct *vma = NULL; in __get_user_pages() local
444 if (!vma || start >= vma->vm_end) { in __get_user_pages()
445 vma = find_extend_vma(mm, start); in __get_user_pages()
446 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
449 gup_flags, &vma, in __get_user_pages()
457 if (!vma || check_vma_flags(vma, gup_flags)) in __get_user_pages()
459 if (is_vm_hugetlb_page(vma)) { in __get_user_pages()
460 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
474 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages()
477 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
497 flush_anon_page(vma, page, start); in __get_user_pages()
503 vmas[i] = vma; in __get_user_pages()
547 struct vm_area_struct *vma; in fixup_user_fault() local
551 vma = find_extend_vma(mm, address); in fixup_user_fault()
552 if (!vma || address < vma->vm_start) in fixup_user_fault()
556 if (!(vm_flags & vma->vm_flags)) in fixup_user_fault()
559 ret = handle_mm_fault(mm, vma, address, fault_flags); in fixup_user_fault()
840 long populate_vma_page_range(struct vm_area_struct *vma, in populate_vma_page_range() argument
843 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
849 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
850 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
859 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in populate_vma_page_range()
866 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) in populate_vma_page_range()
888 struct vm_area_struct *vma = NULL; in __mm_populate() local
904 vma = find_vma(mm, nstart); in __mm_populate()
905 } else if (nstart >= vma->vm_end) in __mm_populate()
906 vma = vma->vm_next; in __mm_populate()
907 if (!vma || vma->vm_start >= end) in __mm_populate()
913 nend = min(end, vma->vm_end); in __mm_populate()
914 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in __mm_populate()
916 if (nstart < vma->vm_start) in __mm_populate()
917 nstart = vma->vm_start; in __mm_populate()
923 ret = populate_vma_page_range(vma, nstart, nend, &locked); in __mm_populate()
956 struct vm_area_struct *vma; in get_dump_page() local
960 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, in get_dump_page()
963 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page()