Lines Matching refs:vma

699 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)  in maybe_pmd_mkwrite()  argument
701 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
715 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument
758 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
765 ret = handle_userfault(vma, address, flags, in __do_huge_pmd_anonymous_page()
771 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
772 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
773 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
775 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
794 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
800 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
808 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
816 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in do_huge_pmd_anonymous_page()
818 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
820 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
842 if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
844 ret = handle_userfault(vma, address, flags, in do_huge_pmd_anonymous_page()
848 set_huge_zero_page(pgtable, mm, vma, in do_huge_pmd_anonymous_page()
862 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); in do_huge_pmd_anonymous_page()
863 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
868 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, in do_huge_pmd_anonymous_page()
872 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
875 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
884 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
887 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
892 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_pmd() argument
895 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pmd()
901 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_pmd()
902 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
904 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
905 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vmf_insert_pfn_pmd()
907 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd()
909 if (track_pfn_insert(vma, &pgprot, pfn)) in vmf_insert_pfn_pmd()
911 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); in vmf_insert_pfn_pmd()
917 struct vm_area_struct *vma) in copy_huge_pmd() argument
953 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, in copy_huge_pmd()
965 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ in copy_huge_pmd()
989 struct vm_area_struct *vma, in huge_pmd_set_accessed() argument
1004 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) in huge_pmd_set_accessed()
1005 update_mmu_cache_pmd(vma, address, pmd); in huge_pmd_set_accessed()
1043 struct vm_area_struct *vma, in do_huge_pmd_wp_page_fallback() argument
1068 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback()
1089 haddr + PAGE_SIZE * i, vma); in do_huge_pmd_wp_page_fallback()
1103 pmdp_huge_clear_flush_notify(vma, haddr, pmd); in do_huge_pmd_wp_page_fallback()
1111 entry = mk_pte(pages[i], vma->vm_page_prot); in do_huge_pmd_wp_page_fallback()
1112 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_huge_pmd_wp_page_fallback()
1115 page_add_new_anon_rmap(pages[i], vma, haddr); in do_huge_pmd_wp_page_fallback()
1117 lru_cache_add_active_or_unevictable(pages[i], vma); in do_huge_pmd_wp_page_fallback()
1151 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_wp_page() argument
1164 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1177 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1178 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) in do_huge_pmd_wp_page()
1179 update_mmu_cache_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1186 if (transparent_hugepage_enabled(vma) && in do_huge_pmd_wp_page()
1188 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); in do_huge_pmd_wp_page()
1189 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page()
1195 split_huge_page_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1198 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, in do_huge_pmd_wp_page()
1216 split_huge_page_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1227 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1244 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in do_huge_pmd_wp_page()
1245 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1246 pmdp_huge_clear_flush_notify(vma, haddr, pmd); in do_huge_pmd_wp_page()
1247 page_add_new_anon_rmap(new_page, vma, haddr); in do_huge_pmd_wp_page()
1249 lru_cache_add_active_or_unevictable(new_page, vma); in do_huge_pmd_wp_page()
1251 update_mmu_cache_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1272 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1277 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1306 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in follow_trans_huge_pmd()
1308 update_mmu_cache_pmd(vma, addr, pmd); in follow_trans_huge_pmd()
1310 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1328 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_numa_page() argument
1343 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); in do_huge_pmd_numa_page()
1372 if (!(vma->vm_flags & VM_WRITE)) in do_huge_pmd_numa_page()
1380 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1424 migrated = migrate_misplaced_transhuge_page(mm, vma, in do_huge_pmd_numa_page()
1436 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1441 update_mmu_cache_pmd(vma, addr, pmdp); in do_huge_pmd_numa_page()
1456 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1462 if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) in zap_huge_pmd()
1473 if (vma_is_dax(vma)) { in zap_huge_pmd()
1496 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, in move_huge_pmd() argument
1505 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1526 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); in move_huge_pmd()
1554 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1557 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1561 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in change_huge_pmd()
1598 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, in __pmd_trans_huge_lock() argument
1601 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1605 wait_split_huge_page(vma->anon_vma, pmd); in __pmd_trans_huge_lock()
1672 struct vm_area_struct *vma, in __split_huge_page_splitting() argument
1675 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1694 pmdp_splitting_flush(vma, address, pmd); in __split_huge_page_splitting()
1824 struct vm_area_struct *vma, in __split_huge_page_map() argument
1827 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
1851 entry = mk_pte(page + i, vma->vm_page_prot); in __split_huge_page_map()
1852 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in __split_huge_page_map()
1890 pmdp_invalidate(vma, address, pmd); in __split_huge_page_map()
1913 struct vm_area_struct *vma = avc->vma; in __split_huge_page() local
1914 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1915 BUG_ON(is_vma_temporary_stack(vma)); in __split_huge_page()
1916 mapcount += __split_huge_page_splitting(page, vma, addr); in __split_huge_page()
1938 struct vm_area_struct *vma = avc->vma; in __split_huge_page() local
1939 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1940 BUG_ON(is_vma_temporary_stack(vma)); in __split_huge_page()
1941 mapcount2 += __split_huge_page_map(page, vma, addr); in __split_huge_page()
1995 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument
2006 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
2021 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) in hugepage_madvise()
2127 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
2131 if (!vma->anon_vma) in khugepaged_enter_vma_merge()
2137 if (vma->vm_ops || (vm_flags & VM_NO_THP)) in khugepaged_enter_vma_merge()
2140 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
2141 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
2143 return khugepaged_enter(vma, vm_flags); in khugepaged_enter_vma_merge()
2196 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument
2209 if (!userfaultfd_armed(vma) && in __collapse_huge_page_isolate()
2217 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
2272 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2283 struct vm_area_struct *vma, in __collapse_huge_page_copy() argument
2294 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
2304 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
2309 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
2322 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
2490 static bool hugepage_vma_check(struct vm_area_struct *vma) in hugepage_vma_check() argument
2492 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || in hugepage_vma_check()
2493 (vma->vm_flags & VM_NOHUGEPAGE)) in hugepage_vma_check()
2496 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
2498 if (is_vma_temporary_stack(vma)) in hugepage_vma_check()
2500 return !(vma->vm_flags & VM_NO_THP); in hugepage_vma_check()
2506 struct vm_area_struct *vma, in collapse_huge_page() argument
2545 vma = find_vma(mm, address); in collapse_huge_page()
2546 if (!vma) in collapse_huge_page()
2548 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in collapse_huge_page()
2549 hend = vma->vm_end & HPAGE_PMD_MASK; in collapse_huge_page()
2552 if (!hugepage_vma_check(vma)) in collapse_huge_page()
2558 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
2573 _pmd = pmdp_collapse_flush(vma, address, pmd); in collapse_huge_page()
2578 isolated = __collapse_huge_page_isolate(vma, address, pte); in collapse_huge_page()
2592 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
2600 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
2602 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); in collapse_huge_page()
2607 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
2608 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); in collapse_huge_page()
2619 page_add_new_anon_rmap(new_page, vma, address); in collapse_huge_page()
2621 lru_cache_add_active_or_unevictable(new_page, vma); in collapse_huge_page()
2624 update_mmu_cache_pmd(vma, address, pmd); in collapse_huge_page()
2640 struct vm_area_struct *vma, in khugepaged_scan_pmd() argument
2665 if (!userfaultfd_armed(vma) && in khugepaged_scan_pmd()
2676 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
2701 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
2711 collapse_huge_page(mm, address, hpage, vma, node); in khugepaged_scan_pmd()
2747 struct vm_area_struct *vma; in khugepaged_scan_mm_slot() local
2766 vma = NULL; in khugepaged_scan_mm_slot()
2768 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2771 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
2779 if (!hugepage_vma_check(vma)) { in khugepaged_scan_mm_slot()
2784 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2785 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2803 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2826 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2931 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2934 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2939 pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2947 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2959 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, in __split_huge_page_pmd() argument
2964 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_pmd()
2969 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); in __split_huge_page_pmd()
2978 if (vma_is_dax(vma)) { in __split_huge_page_pmd()
2979 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_page_pmd()
2983 __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_page_pmd()
3011 struct vm_area_struct *vma; in split_huge_page_pmd_mm() local
3013 vma = find_vma(mm, address); in split_huge_page_pmd_mm()
3014 BUG_ON(vma == NULL); in split_huge_page_pmd_mm()
3015 split_huge_page_pmd(vma, address, pmd); in split_huge_page_pmd_mm()
3045 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
3056 (start & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
3057 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
3058 split_huge_page_address(vma->vm_mm, start); in vma_adjust_trans_huge()
3066 (end & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
3067 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
3068 split_huge_page_address(vma->vm_mm, end); in vma_adjust_trans_huge()
3076 struct vm_area_struct *next = vma->vm_next; in vma_adjust_trans_huge()