Lines Matching refs:vma
529 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, in free_pgtables() argument
532 while (vma) { in free_pgtables()
533 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
534 unsigned long addr = vma->vm_start; in free_pgtables()
540 unlink_anon_vmas(vma); in free_pgtables()
541 unlink_file_vma(vma); in free_pgtables()
543 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
544 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
550 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
552 vma = next; in free_pgtables()
553 next = vma->vm_next; in free_pgtables()
554 unlink_anon_vmas(vma); in free_pgtables()
555 unlink_file_vma(vma); in free_pgtables()
557 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
560 vma = next; in free_pgtables()
564 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, in __pte_alloc() argument
600 wait_split_huge_page(vma->anon_vma, pmd); in __pte_alloc()
647 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
650 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
679 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
680 index = linear_page_index(vma, addr); in print_bad_pte()
690 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
695 vma->vm_file, in print_bad_pte()
696 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
697 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
750 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
758 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
759 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
760 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
763 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
769 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
770 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
776 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
777 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
779 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
788 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
801 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page_pmd() argument
811 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page_pmd()
812 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page_pmd()
818 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page_pmd()
819 if (pfn == vma->vm_pgoff + off) in vm_normal_page_pmd()
821 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page_pmd()
848 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, in copy_one_pte() argument
851 unsigned long vm_flags = vma->vm_flags; in copy_one_pte()
913 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
929 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, in copy_pte_range() argument
968 vma, addr, rss); in copy_pte_range()
992 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, in copy_pmd_range() argument
1008 dst_pmd, src_pmd, addr, vma); in copy_pmd_range()
1018 vma, addr, next)) in copy_pmd_range()
1025 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, in copy_pud_range() argument
1040 vma, addr, next)) in copy_pud_range()
1047 struct vm_area_struct *vma) in copy_page_range() argument
1051 unsigned long addr = vma->vm_start; in copy_page_range()
1052 unsigned long end = vma->vm_end; in copy_page_range()
1064 if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && in copy_page_range()
1065 !vma->anon_vma) in copy_page_range()
1068 if (is_vm_hugetlb_page(vma)) in copy_page_range()
1069 return copy_hugetlb_page_range(dst_mm, src_mm, vma); in copy_page_range()
1071 if (unlikely(vma->vm_flags & VM_PFNMAP)) { in copy_page_range()
1076 ret = track_pfn_copy(vma); in copy_page_range()
1087 is_cow = is_cow_mapping(vma->vm_flags); in copy_page_range()
1102 vma, addr, next))) { in copy_page_range()
1114 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1140 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1164 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1170 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1196 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1226 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1242 vma->vm_start, in zap_pmd_range()
1243 vma->vm_end); in zap_pmd_range()
1247 split_huge_page_pmd(vma, addr, pmd); in zap_pmd_range()
1248 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1261 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1270 struct vm_area_struct *vma, pgd_t *pgd, in zap_pud_range() argument
1282 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1289 struct vm_area_struct *vma, in unmap_page_range() argument
1300 tlb_start_vma(tlb, vma); in unmap_page_range()
1301 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1306 next = zap_pud_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1308 tlb_end_vma(tlb, vma); in unmap_page_range()
1313 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1317 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1320 if (start >= vma->vm_end) in unmap_single_vma()
1322 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1323 if (end <= vma->vm_start) in unmap_single_vma()
1326 if (vma->vm_file) in unmap_single_vma()
1327 uprobe_munmap(vma, start, end); in unmap_single_vma()
1329 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1330 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1333 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1345 if (vma->vm_file) { in unmap_single_vma()
1346 i_mmap_lock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1347 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1348 i_mmap_unlock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1351 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1374 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1377 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1380 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas()
1381 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1394 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1397 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1405 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) in zap_page_range()
1406 unmap_single_vma(&tlb, vma, start, end, details); in zap_page_range()
1420 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1423 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1431 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1448 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1451 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1452 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1454 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1481 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1484 struct mm_struct *mm = vma->vm_mm; in insert_page()
1543 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1546 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1550 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
1551 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1552 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
1553 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
1555 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1559 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
1562 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1578 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1604 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pfn() argument
1608 pgprot_t pgprot = vma->vm_page_prot; in vm_insert_pfn()
1615 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vm_insert_pfn()
1616 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vm_insert_pfn()
1618 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vm_insert_pfn()
1619 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vm_insert_pfn()
1621 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_pfn()
1623 if (track_pfn_insert(vma, &pgprot, pfn)) in vm_insert_pfn()
1626 ret = insert_pfn(vma, addr, pfn, pgprot); in vm_insert_pfn()
1632 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vm_insert_mixed() argument
1635 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); in vm_insert_mixed()
1637 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_mixed()
1651 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_mixed()
1653 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); in vm_insert_mixed()
1734 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1740 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
1761 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range()
1762 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
1764 vma->vm_pgoff = pfn; in remap_pfn_range()
1767 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
1771 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1776 flush_cache_range(vma, addr, end); in remap_pfn_range()
1786 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); in remap_pfn_range()
1805 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1824 if (vma->vm_pgoff > pages) in vm_iomap_memory()
1826 pfn += vma->vm_pgoff; in vm_iomap_memory()
1827 pages -= vma->vm_pgoff; in vm_iomap_memory()
1830 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1835 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1964 …oid cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) in cow_user_page() argument
1989 copy_user_highpage(dst, src, va, vma); in cow_user_page()
1998 static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, in do_page_mkwrite() argument
2010 ret = vma->vm_ops->page_mkwrite(vma, &vmf); in do_page_mkwrite()
2034 struct vm_area_struct *vma, unsigned long address, in wp_page_reuse() argument
2049 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_reuse()
2051 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
2052 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) in wp_page_reuse()
2053 update_mmu_cache(vma, address, page_table); in wp_page_reuse()
2078 file_update_time(vma->vm_file); in wp_page_reuse()
2100 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_copy() argument
2112 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
2116 new_page = alloc_zeroed_user_highpage_movable(vma, address); in wp_page_copy()
2120 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in wp_page_copy()
2123 cow_user_page(new_page, old_page, address, vma); in wp_page_copy()
2146 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_copy()
2147 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy()
2148 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
2155 ptep_clear_flush_notify(vma, address, page_table); in wp_page_copy()
2156 page_add_new_anon_rmap(new_page, vma, address); in wp_page_copy()
2158 lru_cache_add_active_or_unevictable(new_page, vma); in wp_page_copy()
2165 update_mmu_cache(vma, address, page_table); in wp_page_copy()
2209 if (page_copied && (vma->vm_flags & VM_LOCKED)) { in wp_page_copy()
2230 struct vm_area_struct *vma, unsigned long address, in wp_pfn_shared() argument
2234 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
2237 .pgoff = linear_page_index(vma, address), in wp_pfn_shared()
2244 ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); in wp_pfn_shared()
2257 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2261 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_shared() argument
2276 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
2280 tmp = do_page_mkwrite(vma, old_page, address); in wp_page_shared()
2303 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2325 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_wp_page() argument
2332 old_page = vm_normal_page(vma, address, orig_pte); in do_wp_page()
2341 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2343 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2347 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2376 page_move_anon_rmap(old_page, vma, address); in do_wp_page()
2378 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2382 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2384 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2394 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2398 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
2402 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
2408 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
2411 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
2414 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
2415 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
2424 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
2425 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2426 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2486 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_swap_page() argument
2509 print_bad_pte(vma, address, orig_pte, NULL); in do_swap_page()
2518 GFP_HIGHUSER_MOVABLE, vma, address); in do_swap_page()
2564 page = ksm_might_need_to_copy(page, vma, address); in do_swap_page()
2600 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
2602 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
2607 flush_icache_page(vma, page); in do_swap_page()
2612 do_page_add_anon_rmap(page, vma, address, exclusive); in do_swap_page()
2615 page_add_new_anon_rmap(page, vma, address); in do_swap_page()
2617 lru_cache_add_active_or_unevictable(page, vma); in do_swap_page()
2621 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) in do_swap_page()
2638 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2645 update_mmu_cache(vma, address, page_table); in do_swap_page()
2669 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) in check_stack_guard_page() argument
2672 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { in check_stack_guard_page()
2673 struct vm_area_struct *prev = vma->vm_prev; in check_stack_guard_page()
2684 return expand_downwards(vma, address - PAGE_SIZE); in check_stack_guard_page()
2686 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { in check_stack_guard_page()
2687 struct vm_area_struct *next = vma->vm_next; in check_stack_guard_page()
2693 return expand_upwards(vma, address + PAGE_SIZE); in check_stack_guard_page()
2703 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_anonymous_page() argument
2715 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
2719 if (check_stack_guard_page(vma, address) < 0) in do_anonymous_page()
2725 vma->vm_page_prot)); in do_anonymous_page()
2730 if (userfaultfd_missing(vma)) { in do_anonymous_page()
2732 return handle_userfault(vma, address, flags, in do_anonymous_page()
2739 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
2741 page = alloc_zeroed_user_highpage_movable(vma, address); in do_anonymous_page()
2755 entry = mk_pte(page, vma->vm_page_prot); in do_anonymous_page()
2756 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
2764 if (userfaultfd_missing(vma)) { in do_anonymous_page()
2768 return handle_userfault(vma, address, flags, in do_anonymous_page()
2773 page_add_new_anon_rmap(page, vma, address); in do_anonymous_page()
2775 lru_cache_add_active_or_unevictable(page, vma); in do_anonymous_page()
2780 update_mmu_cache(vma, address, page_table); in do_anonymous_page()
2799 static int __do_fault(struct vm_area_struct *vma, unsigned long address, in __do_fault() argument
2812 ret = vma->vm_ops->fault(vma, &vmf); in __do_fault()
2850 void do_set_pte(struct vm_area_struct *vma, unsigned long address, in do_set_pte() argument
2855 flush_icache_page(vma, page); in do_set_pte()
2856 entry = mk_pte(page, vma->vm_page_prot); in do_set_pte()
2858 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_set_pte()
2860 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
2861 page_add_new_anon_rmap(page, vma, address); in do_set_pte()
2863 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); in do_set_pte()
2866 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2869 update_mmu_cache(vma, address, pte); in do_set_pte()
2936 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, in do_fault_around() argument
2947 start_addr = max(address & mask, vma->vm_start); in do_fault_around()
2958 max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, in do_fault_around()
2966 if (start_addr >= vma->vm_end) in do_fault_around()
2976 vma->vm_ops->map_pages(vma, &vmf); in do_fault_around()
2979 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_read_fault() argument
2993 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { in do_read_fault()
2995 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
3001 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_read_fault()
3012 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
3019 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_cow_fault() argument
3029 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
3032 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_cow_fault()
3041 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); in do_cow_fault()
3046 copy_user_highpage(new_page, fault_page, address, vma); in do_cow_fault()
3060 i_mmap_unlock_read(vma->vm_file->f_mapping); in do_cow_fault()
3064 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
3066 lru_cache_add_active_or_unevictable(new_page, vma); in do_cow_fault()
3076 i_mmap_unlock_read(vma->vm_file->f_mapping); in do_cow_fault()
3085 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_shared_fault() argument
3096 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_shared_fault()
3104 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
3106 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault()
3121 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
3134 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { in do_shared_fault()
3142 if (!vma->vm_ops->page_mkwrite) in do_shared_fault()
3143 file_update_time(vma->vm_file); in do_shared_fault()
3154 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_fault() argument
3159 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in do_fault()
3163 if (!vma->vm_ops->fault) in do_fault()
3166 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3168 if (!(vma->vm_flags & VM_SHARED)) in do_fault()
3169 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3171 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3174 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
3186 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
3189 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_numa_page() argument
3202 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); in do_numa_page()
3221 pte = pte_modify(pte, vma->vm_page_prot); in do_numa_page()
3226 update_mmu_cache(vma, addr, ptep); in do_numa_page()
3228 page = vm_normal_page(vma, addr, pte); in do_numa_page()
3242 if (!(vma->vm_flags & VM_WRITE)) in do_numa_page()
3249 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
3254 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); in do_numa_page()
3262 migrated = migrate_misplaced_page(page, vma, target_nid); in do_numa_page()
3275 static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in create_huge_pmd() argument
3278 if (vma_is_anonymous(vma)) in create_huge_pmd()
3279 return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); in create_huge_pmd()
3280 if (vma->vm_ops->pmd_fault) in create_huge_pmd()
3281 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); in create_huge_pmd()
3285 static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in wp_huge_pmd() argument
3289 if (vma_is_anonymous(vma)) in wp_huge_pmd()
3290 return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); in wp_huge_pmd()
3291 if (vma->vm_ops->pmd_fault) in wp_huge_pmd()
3292 return vma->vm_ops->pmd_fault(vma, address, pmd, flags); in wp_huge_pmd()
3313 struct vm_area_struct *vma, unsigned long address, in handle_pte_fault() argument
3331 if (vma_is_anonymous(vma)) in handle_pte_fault()
3332 return do_anonymous_page(mm, vma, address, in handle_pte_fault()
3335 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3338 return do_swap_page(mm, vma, address, in handle_pte_fault()
3343 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3351 return do_wp_page(mm, vma, address, in handle_pte_fault()
3356 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3357 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3366 flush_tlb_fix_spurious_fault(vma, address); in handle_pte_fault()
3379 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in __handle_mm_fault() argument
3387 if (unlikely(is_vm_hugetlb_page(vma))) in __handle_mm_fault()
3388 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3397 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3398 int ret = create_huge_pmd(mm, vma, address, pmd, flags); in __handle_mm_fault()
3418 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3422 ret = wp_huge_pmd(mm, vma, address, pmd, in __handle_mm_fault()
3427 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3440 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3463 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3472 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in handle_mm_fault() argument
3492 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3624 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
3631 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
3634 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
3644 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
3652 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
3655 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
3672 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
3680 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
3702 struct vm_area_struct *vma; in __access_remote_vm() local
3713 write, 1, &page, &vma); in __access_remote_vm()
3722 vma = find_vma(mm, addr); in __access_remote_vm()
3723 if (!vma || vma->vm_start > addr) in __access_remote_vm()
3725 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
3726 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
3740 copy_to_user_page(vma, page, addr, in __access_remote_vm()
3744 copy_from_user_page(vma, page, addr, in __access_remote_vm()
3802 struct vm_area_struct *vma; in print_vma_addr() local
3812 vma = find_vma(mm, ip); in print_vma_addr()
3813 if (vma && vma->vm_file) { in print_vma_addr()
3814 struct file *f = vma->vm_file; in print_vma_addr()
3823 vma->vm_start, in print_vma_addr()
3824 vma->vm_end - vma->vm_start); in print_vma_addr()
3887 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
3896 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
3905 unsigned long addr, struct vm_area_struct *vma, in copy_user_huge_page() argument
3911 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()
3919 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); in copy_user_huge_page()