Lines Matching refs:vma

132 static void anon_vma_chain_link(struct vm_area_struct *vma,  in anon_vma_chain_link()  argument
136 avc->vma = vma; in anon_vma_chain_link()
138 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link()
169 int anon_vma_prepare(struct vm_area_struct *vma) in anon_vma_prepare() argument
171 struct anon_vma *anon_vma = vma->anon_vma; in anon_vma_prepare()
176 struct mm_struct *mm = vma->vm_mm; in anon_vma_prepare()
183 anon_vma = find_mergeable_anon_vma(vma); in anon_vma_prepare()
195 if (likely(!vma->anon_vma)) { in anon_vma_prepare()
196 vma->anon_vma = anon_vma; in anon_vma_prepare()
197 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_prepare()
311 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument
322 vma->anon_vma = NULL; in anon_vma_fork()
328 error = anon_vma_clone(vma, pvma); in anon_vma_fork()
333 if (vma->anon_vma) in anon_vma_fork()
357 vma->anon_vma = anon_vma; in anon_vma_fork()
359 anon_vma_chain_link(vma, avc, anon_vma); in anon_vma_fork()
368 unlink_anon_vmas(vma); in anon_vma_fork()
372 void unlink_anon_vmas(struct vm_area_struct *vma) in unlink_anon_vmas() argument
381 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
399 if (vma->anon_vma) in unlink_anon_vmas()
400 vma->anon_vma->degree--; in unlink_anon_vmas()
408 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { in unlink_anon_vmas()
572 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
575 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in __vma_address()
579 vma_address(struct page *page, struct vm_area_struct *vma) in vma_address() argument
581 unsigned long address = __vma_address(page, vma); in vma_address()
584 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in vma_address()
694 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
703 if (!vma->anon_vma || !page__anon_vma || in page_address_in_vma()
704 vma->anon_vma->root != page__anon_vma->root) in page_address_in_vma()
707 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
711 address = __vma_address(page, vma); in page_address_in_vma()
712 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_address_in_vma()
803 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) in page_mapped_in_vma() argument
809 address = __vma_address(page, vma); in page_mapped_in_vma()
810 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) in page_mapped_in_vma()
812 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
829 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
832 struct mm_struct *mm = vma->vm_mm; in page_referenced_one()
849 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
856 if (pmdp_clear_flush_young_notify(vma, address, pmd)) in page_referenced_one()
870 if (vma->vm_flags & VM_LOCKED) { in page_referenced_one()
876 if (ptep_clear_flush_young_notify(vma, address, pte)) { in page_referenced_one()
884 if (likely(!(vma->vm_flags & VM_SEQ_READ))) in page_referenced_one()
897 pra->vm_flags |= vma->vm_flags; in page_referenced_one()
907 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) in invalid_page_referenced_vma() argument
912 if (!mm_match_cgroup(vma->vm_mm, memcg)) in invalid_page_referenced_vma()
976 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
979 struct mm_struct *mm = vma->vm_mm; in page_mkclean_one()
992 flush_cache_page(vma, address, pte_pfn(*pte)); in page_mkclean_one()
993 entry = ptep_clear_flush(vma, address, pte); in page_mkclean_one()
1010 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) in invalid_mkclean_vma() argument
1012 if (vma->vm_flags & VM_SHARED) in invalid_mkclean_vma()
1055 struct vm_area_struct *vma, unsigned long address) in page_move_anon_rmap() argument
1057 struct anon_vma *anon_vma = vma->anon_vma; in page_move_anon_rmap()
1060 VM_BUG_ON_VMA(!anon_vma, vma); in page_move_anon_rmap()
1061 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); in page_move_anon_rmap()
1080 struct vm_area_struct *vma, unsigned long address, int exclusive) in __page_set_anon_rmap() argument
1082 struct anon_vma *anon_vma = vma->anon_vma; in __page_set_anon_rmap()
1099 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1109 struct vm_area_struct *vma, unsigned long address) in __page_check_anon_rmap() argument
1124 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1125 BUG_ON(page->index != linear_page_index(vma, address)); in __page_check_anon_rmap()
1141 struct vm_area_struct *vma, unsigned long address) in page_add_anon_rmap() argument
1143 do_page_add_anon_rmap(page, vma, address, 0); in page_add_anon_rmap()
1152 struct vm_area_struct *vma, unsigned long address, int exclusive) in do_page_add_anon_rmap() argument
1174 __page_set_anon_rmap(page, vma, address, exclusive); in do_page_add_anon_rmap()
1176 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1190 struct vm_area_struct *vma, unsigned long address) in page_add_new_anon_rmap() argument
1192 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); in page_add_new_anon_rmap()
1199 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1297 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1300 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one()
1308 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) in try_to_unmap_one()
1321 if (vma->vm_flags & VM_LOCKED) { in try_to_unmap_one()
1331 if (ptep_clear_flush_young_notify(vma, address, pte)) { in try_to_unmap_one()
1338 flush_cache_page(vma, address, page_to_pfn(page)); in try_to_unmap_one()
1351 pteval = ptep_clear_flush(vma, address, pte); in try_to_unmap_one()
1434 bool is_vma_temporary_stack(struct vm_area_struct *vma) in is_vma_temporary_stack() argument
1436 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); in is_vma_temporary_stack()
1441 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == in is_vma_temporary_stack()
1448 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) in invalid_migration_vma() argument
1450 return is_vma_temporary_stack(vma); in invalid_migration_vma()
1592 struct vm_area_struct *vma = avc->vma; in rmap_walk_anon() local
1593 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1597 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_anon()
1600 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_anon()
1627 struct vm_area_struct *vma; in rmap_walk_file() local
1643 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in rmap_walk_file()
1644 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1648 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_file()
1651 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_file()
1680 struct vm_area_struct *vma, unsigned long address, int exclusive) in __hugepage_set_anon_rmap() argument
1682 struct anon_vma *anon_vma = vma->anon_vma; in __hugepage_set_anon_rmap()
1693 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1697 struct vm_area_struct *vma, unsigned long address) in hugepage_add_anon_rmap() argument
1699 struct anon_vma *anon_vma = vma->anon_vma; in hugepage_add_anon_rmap()
1707 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1711 struct vm_area_struct *vma, unsigned long address) in hugepage_add_new_anon_rmap() argument
1713 BUG_ON(address < vma->vm_start || address >= vma->vm_end); in hugepage_add_new_anon_rmap()
1715 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()