Lines Matching refs:vma
211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument
213 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
617 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
619 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
620 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
623 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
626 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
633 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
637 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
640 hstate = hstate_vma(vma); in vma_kernel_pagesize()
653 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() argument
655 return vma_kernel_pagesize(vma); in vma_mmu_pagesize()
687 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() argument
689 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
692 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() argument
695 vma->vm_private_data = (void *)value; in set_vma_private_data()
747 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() argument
749 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in vma_resv_map()
750 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
751 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
757 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
762 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument
764 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_map()
765 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
767 set_vma_private_data(vma, (get_vma_private_data(vma) & in set_vma_resv_map()
771 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) in set_vma_resv_flags() argument
773 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_flags()
774 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
776 set_vma_private_data(vma, get_vma_private_data(vma) | flags); in set_vma_resv_flags()
779 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) in is_vma_resv_set() argument
781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in is_vma_resv_set()
783 return (get_vma_private_data(vma) & flag) != 0; in is_vma_resv_set()
787 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in reset_vma_resv_huge_pages()
790 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
791 vma->vm_private_data = (void *)0; in reset_vma_resv_huge_pages()
795 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) in vma_has_reserves() argument
797 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
807 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
814 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
832 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in vma_has_reserves()
876 struct vm_area_struct *vma, in dequeue_huge_page_vma() argument
893 if (!vma_has_reserves(vma, chg) && in dequeue_huge_page_vma()
903 zonelist = huge_zonelist(vma, address, in dequeue_huge_page_vma()
913 if (!vma_has_reserves(vma, chg)) in dequeue_huge_page_vma()
1456 struct vm_area_struct *vma, unsigned long addr, int nid) in __hugetlb_alloc_buddy_huge_page() argument
1471 if (!IS_ENABLED(CONFIG_NUMA) || !vma) { in __hugetlb_alloc_buddy_huge_page()
1498 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask); in __hugetlb_alloc_buddy_huge_page()
1521 struct vm_area_struct *vma, unsigned long addr, int nid) in __alloc_buddy_huge_page() argument
1534 if (vma || (addr != -1)) { in __alloc_buddy_huge_page()
1571 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid); in __alloc_buddy_huge_page()
1613 struct vm_area_struct *vma, unsigned long addr) in __alloc_buddy_huge_page_with_mpol() argument
1615 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE); in __alloc_buddy_huge_page_with_mpol()
1785 struct vm_area_struct *vma, unsigned long addr, in __vma_reservation_common() argument
1792 resv = vma_resv_map(vma); in __vma_reservation_common()
1796 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
1812 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
1819 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
1821 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
1825 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
1827 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
1831 struct vm_area_struct *vma, unsigned long addr) in vma_end_reservation() argument
1833 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
1836 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page() argument
1839 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_huge_page()
1840 struct hstate *h = hstate_vma(vma); in alloc_huge_page()
1853 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
1867 vma_end_reservation(h, vma, addr); in alloc_huge_page()
1893 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { in alloc_huge_page()
1912 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
1935 vma_end_reservation(h, vma, addr); in alloc_huge_page()
1944 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr() argument
1947 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr()
2961 static void hugetlb_vm_op_open(struct vm_area_struct *vma) in hugetlb_vm_op_open() argument
2963 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
2973 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_open()
2977 static void hugetlb_vm_op_close(struct vm_area_struct *vma) in hugetlb_vm_op_close() argument
2979 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close()
2980 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close()
2981 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close()
2985 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
2988 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
2989 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3011 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in hugetlb_vm_op_fault() argument
3023 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3030 vma->vm_page_prot))); in make_huge_pte()
3033 vma->vm_page_prot)); in make_huge_pte()
3037 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3042 static void set_huge_ptep_writable(struct vm_area_struct *vma, in set_huge_ptep_writable() argument
3048 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
3049 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable()
3079 struct vm_area_struct *vma) in copy_hugetlb_page_range() argument
3085 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range()
3091 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
3093 mmun_start = vma->vm_start; in copy_hugetlb_page_range()
3094 mmun_end = vma->vm_end; in copy_hugetlb_page_range()
3098 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
3156 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
3161 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
3167 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range()
3172 WARN_ON(!is_vm_hugetlb_page(vma)); in __unmap_hugepage_range()
3176 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
3217 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); in __unmap_hugepage_range()
3253 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
3257 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range_final() argument
3260 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
3272 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
3275 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
3281 mm = vma->vm_mm; in unmap_hugepage_range()
3284 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); in unmap_hugepage_range()
3294 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, in unmap_ref_private() argument
3297 struct hstate *h = hstate_vma(vma); in unmap_ref_private()
3307 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
3308 vma->vm_pgoff; in unmap_ref_private()
3309 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private()
3319 if (iter_vma == vma) in unmap_ref_private()
3350 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_cow() argument
3354 struct hstate *h = hstate_vma(vma); in hugetlb_cow()
3366 page_move_anon_rmap(old_page, vma, address); in hugetlb_cow()
3367 set_huge_ptep_writable(vma, address, ptep); in hugetlb_cow()
3380 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && in hugetlb_cow()
3391 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow()
3404 unmap_ref_private(mm, vma, old_page, address); in hugetlb_cow()
3427 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_cow()
3432 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
3451 huge_ptep_clear_flush(vma, address, ptep); in hugetlb_cow()
3454 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
3456 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow()
3473 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_page() argument
3478 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
3479 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3489 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_present() argument
3495 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
3496 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3521 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_no_page() argument
3525 struct hstate *h = hstate_vma(vma); in hugetlb_no_page()
3538 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { in hugetlb_no_page()
3554 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3567 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
3577 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_no_page()
3602 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3603 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3608 vma_end_reservation(h, vma, address); in hugetlb_no_page()
3623 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3626 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3627 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
3631 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3633 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); in hugetlb_no_page()
3651 struct vm_area_struct *vma, in hugetlb_fault_mutex_hash() argument
3658 if (vma->vm_flags & VM_SHARED) { in hugetlb_fault_mutex_hash()
3676 struct vm_area_struct *vma, in hugetlb_fault_mutex_hash() argument
3684 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_fault() argument
3694 struct hstate *h = hstate_vma(vma); in hugetlb_fault()
3704 migration_entry_wait_huge(vma, mm, ptep); in hugetlb_fault()
3715 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
3716 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3723 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); in hugetlb_fault()
3728 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); in hugetlb_fault()
3753 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3758 vma_end_reservation(h, vma, address); in hugetlb_fault()
3760 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_fault()
3762 vma, address); in hugetlb_fault()
3787 ret = hugetlb_cow(mm, vma, address, ptep, entry, in hugetlb_fault()
3794 if (huge_ptep_set_access_flags(vma, address, ptep, entry, in hugetlb_fault()
3796 update_mmu_cache(vma, address, ptep); in hugetlb_fault()
3822 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, in follow_hugetlb_page() argument
3830 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page()
3832 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
3867 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
3891 ret = hugetlb_fault(mm, vma, vaddr, in follow_hugetlb_page()
3909 vmas[i] = vma; in follow_hugetlb_page()
3915 if (vaddr < vma->vm_end && remainder && in follow_hugetlb_page()
3931 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
3934 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
3938 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection()
3942 flush_cache_range(vma, address, end); in hugetlb_change_protection()
3945 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
3979 pte = arch_make_huge_pte(pte, vma, NULL, 0); in hugetlb_change_protection()
3991 flush_tlb_range(vma, start, end); in hugetlb_change_protection()
3993 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4001 struct vm_area_struct *vma, in hugetlb_reserve_pages() argument
4024 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4036 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
4037 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); in hugetlb_reserve_pages()
4078 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4098 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
4100 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_reserve_pages()
4141 struct vm_area_struct *vma, in page_table_shareable() argument
4150 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in page_table_shareable()
4165 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument
4173 if (vma->vm_flags & VM_MAYSHARE && in vma_shareable()
4174 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable()
4190 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local
4191 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
4192 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
4193 vma->vm_pgoff; in huge_pmd_share()
4200 if (!vma_shareable(vma, addr)) in huge_pmd_share()
4205 if (svma == vma) in huge_pmd_share()
4208 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
4222 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); in huge_pmd_share()