Lines Matching refs:vma

21 static struct page *no_page_table(struct vm_area_struct *vma,  in no_page_table()  argument
32 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) in no_page_table()
37 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, in follow_pfn_pte() argument
52 set_pte_at(vma->vm_mm, address, pte, entry); in follow_pfn_pte()
53 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
61 static struct page *follow_page_pte(struct vm_area_struct *vma, in follow_page_pte() argument
64 struct mm_struct *mm = vma->vm_mm; in follow_page_pte()
71 return no_page_table(vma, flags); in follow_page_pte()
100 page = vm_normal_page(vma, address, pte); in follow_page_pte()
113 ret = follow_pfn_pte(vma, address, ptep, flags); in follow_page_pte()
132 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_page_pte()
161 return no_page_table(vma, flags); in follow_page_pte()
177 struct page *follow_page_mask(struct vm_area_struct *vma, in follow_page_mask() argument
186 struct mm_struct *mm = vma->vm_mm; in follow_page_mask()
198 return no_page_table(vma, flags); in follow_page_mask()
202 return no_page_table(vma, flags); in follow_page_mask()
203 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
207 return no_page_table(vma, flags); in follow_page_mask()
210 return no_page_table(vma, flags); in follow_page_mask()
214 return no_page_table(vma, flags); in follow_page_mask()
215 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask()
219 return no_page_table(vma, flags); in follow_page_mask()
222 return no_page_table(vma, flags); in follow_page_mask()
225 split_huge_page_pmd(vma, address, pmd); in follow_page_mask()
226 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
232 wait_split_huge_page(vma->anon_vma, pmd); in follow_page_mask()
234 page = follow_trans_huge_pmd(vma, address, in follow_page_mask()
243 return follow_page_pte(vma, address, pmd, flags); in follow_page_mask()
247 unsigned int gup_flags, struct vm_area_struct **vma, in get_gate_page() argument
273 *vma = get_gate_vma(mm); in get_gate_page()
276 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
295 static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, in faultin_page() argument
298 struct mm_struct *mm = vma->vm_mm; in faultin_page()
307 (stack_guard_page_start(vma, address) || in faultin_page()
308 stack_guard_page_end(vma, address + PAGE_SIZE))) in faultin_page()
321 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page()
354 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) in faultin_page()
359 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) in check_vma_flags() argument
361 vm_flags_t vm_flags = vma->vm_flags; in check_vma_flags()
460 struct vm_area_struct *vma = NULL; in __get_user_pages() local
481 if (!vma || start >= vma->vm_end) { in __get_user_pages()
482 vma = find_extend_vma(mm, start); in __get_user_pages()
483 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
486 gup_flags, &vma, in __get_user_pages()
494 if (!vma || check_vma_flags(vma, gup_flags)) in __get_user_pages()
496 if (is_vm_hugetlb_page(vma)) { in __get_user_pages()
497 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
511 page = follow_page_mask(vma, start, foll_flags, &page_mask); in __get_user_pages()
514 ret = faultin_page(tsk, vma, start, &foll_flags, in __get_user_pages()
540 flush_anon_page(vma, page, start); in __get_user_pages()
546 vmas[i] = vma; in __get_user_pages()
590 struct vm_area_struct *vma; in fixup_user_fault() local
594 vma = find_extend_vma(mm, address); in fixup_user_fault()
595 if (!vma || address < vma->vm_start) in fixup_user_fault()
599 if (!(vm_flags & vma->vm_flags)) in fixup_user_fault()
602 ret = handle_mm_fault(mm, vma, address, fault_flags); in fixup_user_fault()
883 long populate_vma_page_range(struct vm_area_struct *vma, in populate_vma_page_range() argument
886 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range()
892 VM_BUG_ON_VMA(start < vma->vm_start, vma); in populate_vma_page_range()
893 VM_BUG_ON_VMA(end > vma->vm_end, vma); in populate_vma_page_range()
897 if (vma->vm_flags & VM_LOCKONFAULT) in populate_vma_page_range()
905 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) in populate_vma_page_range()
912 if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) in populate_vma_page_range()
934 struct vm_area_struct *vma = NULL; in __mm_populate() local
950 vma = find_vma(mm, nstart); in __mm_populate()
951 } else if (nstart >= vma->vm_end) in __mm_populate()
952 vma = vma->vm_next; in __mm_populate()
953 if (!vma || vma->vm_start >= end) in __mm_populate()
959 nend = min(end, vma->vm_end); in __mm_populate()
960 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) in __mm_populate()
962 if (nstart < vma->vm_start) in __mm_populate()
963 nstart = vma->vm_start; in __mm_populate()
969 ret = populate_vma_page_range(vma, nstart, nend, &locked); in __mm_populate()
1002 struct vm_area_struct *vma; in get_dump_page() local
1006 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, in get_dump_page()
1009 flush_cache_page(vma, addr, page_to_pfn(page)); in get_dump_page()