Lines Matching refs:vma
363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
414 struct vm_area_struct *vma; in find_mergeable_vma() local
417 vma = find_vma(mm, addr); in find_mergeable_vma()
418 if (!vma || vma->vm_start > addr) in find_mergeable_vma()
420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
422 return vma; in find_mergeable_vma()
429 struct vm_area_struct *vma; in break_cow() local
438 vma = find_mergeable_vma(mm, addr); in break_cow()
439 if (vma) in break_cow()
440 break_ksm(vma, addr); in break_cow()
462 struct vm_area_struct *vma; in get_mergeable_page() local
466 vma = find_mergeable_vma(mm, addr); in get_mergeable_page()
467 if (!vma) in get_mergeable_page()
470 page = follow_page(vma, addr, FOLL_GET); in get_mergeable_page()
474 flush_anon_page(vma, page, addr); in get_mergeable_page()
682 static int unmerge_ksm_pages(struct vm_area_struct *vma, in unmerge_ksm_pages() argument
689 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
694 err = break_ksm(vma, addr); in unmerge_ksm_pages()
772 struct vm_area_struct *vma; in unmerge_and_remove_all_rmap_items() local
784 for (vma = mm->mmap; vma; vma = vma->vm_next) { in unmerge_and_remove_all_rmap_items()
787 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items()
789 err = unmerge_ksm_pages(vma, in unmerge_and_remove_all_rmap_items()
790 vma->vm_start, vma->vm_end); in unmerge_and_remove_all_rmap_items()
856 static int write_protect_page(struct vm_area_struct *vma, struct page *page, in write_protect_page() argument
859 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
868 addr = page_address_in_vma(page, vma); in write_protect_page()
886 flush_cache_page(vma, addr, page_to_pfn(page)); in write_protect_page()
896 entry = ptep_clear_flush_notify(vma, addr, ptep); in write_protect_page()
930 static int replace_page(struct vm_area_struct *vma, struct page *page, in replace_page() argument
933 struct mm_struct *mm = vma->vm_mm; in replace_page()
942 addr = page_address_in_vma(page, vma); in replace_page()
961 page_add_anon_rmap(kpage, vma, addr); in replace_page()
963 flush_cache_page(vma, addr, pte_pfn(*ptep)); in replace_page()
964 ptep_clear_flush_notify(vma, addr, ptep); in replace_page()
965 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in replace_page()
1016 static int try_to_merge_one_page(struct vm_area_struct *vma, in try_to_merge_one_page() argument
1046 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page()
1057 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page()
1060 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { in try_to_merge_one_page()
1085 struct vm_area_struct *vma; in try_to_merge_with_ksm_page() local
1089 vma = find_mergeable_vma(mm, rmap_item->address); in try_to_merge_with_ksm_page()
1090 if (!vma) in try_to_merge_with_ksm_page()
1093 err = try_to_merge_one_page(vma, page, kpage); in try_to_merge_with_ksm_page()
1101 rmap_item->anon_vma = vma->anon_vma; in try_to_merge_with_ksm_page()
1102 get_anon_vma(vma->anon_vma); in try_to_merge_with_ksm_page()
1558 struct vm_area_struct *vma; in scan_get_next_rmap_item() local
1621 vma = NULL; in scan_get_next_rmap_item()
1623 vma = find_vma(mm, ksm_scan.address); in scan_get_next_rmap_item()
1625 for (; vma; vma = vma->vm_next) { in scan_get_next_rmap_item()
1626 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item()
1628 if (ksm_scan.address < vma->vm_start) in scan_get_next_rmap_item()
1629 ksm_scan.address = vma->vm_start; in scan_get_next_rmap_item()
1630 if (!vma->anon_vma) in scan_get_next_rmap_item()
1631 ksm_scan.address = vma->vm_end; in scan_get_next_rmap_item()
1633 while (ksm_scan.address < vma->vm_end) { in scan_get_next_rmap_item()
1636 *page = follow_page(vma, ksm_scan.address, FOLL_GET); in scan_get_next_rmap_item()
1644 flush_anon_page(vma, *page, ksm_scan.address); in scan_get_next_rmap_item()
1757 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() argument
1760 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1791 if (vma->anon_vma) { in ksm_madvise()
1792 err = unmerge_ksm_pages(vma, start, end); in ksm_madvise()
1882 struct vm_area_struct *vma, unsigned long address) in ksm_might_need_to_copy() argument
1893 } else if (anon_vma->root == vma->anon_vma->root && in ksm_might_need_to_copy()
1894 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
1900 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
1902 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
1934 struct vm_area_struct *vma; in rmap_walk_ksm() local
1941 vma = vmac->vma; in rmap_walk_ksm()
1942 if (rmap_item->address < vma->vm_start || in rmap_walk_ksm()
1943 rmap_item->address >= vma->vm_end) in rmap_walk_ksm()
1951 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
1954 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_ksm()
1957 ret = rwc->rmap_one(page, vma, in rmap_walk_ksm()