Lines Matching refs:vma
363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) in break_ksm() argument
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
374 ret = handle_mm_fault(vma->vm_mm, vma, addr, in break_ksm()
414 struct vm_area_struct *vma; in find_mergeable_vma() local
417 vma = find_vma(mm, addr); in find_mergeable_vma()
418 if (!vma || vma->vm_start > addr) in find_mergeable_vma()
420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in find_mergeable_vma()
422 return vma; in find_mergeable_vma()
429 struct vm_area_struct *vma; in break_cow() local
438 vma = find_mergeable_vma(mm, addr); in break_cow()
439 if (vma) in break_cow()
440 break_ksm(vma, addr); in break_cow()
462 struct vm_area_struct *vma; in get_mergeable_page() local
466 vma = find_mergeable_vma(mm, addr); in get_mergeable_page()
467 if (!vma) in get_mergeable_page()
470 page = follow_page(vma, addr, FOLL_GET); in get_mergeable_page()
474 flush_anon_page(vma, page, addr); in get_mergeable_page()
681 static int unmerge_ksm_pages(struct vm_area_struct *vma, in unmerge_ksm_pages() argument
688 if (ksm_test_exit(vma->vm_mm)) in unmerge_ksm_pages()
693 err = break_ksm(vma, addr); in unmerge_ksm_pages()
771 struct vm_area_struct *vma; in unmerge_and_remove_all_rmap_items() local
783 for (vma = mm->mmap; vma; vma = vma->vm_next) { in unmerge_and_remove_all_rmap_items()
786 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) in unmerge_and_remove_all_rmap_items()
788 err = unmerge_ksm_pages(vma, in unmerge_and_remove_all_rmap_items()
789 vma->vm_start, vma->vm_end); in unmerge_and_remove_all_rmap_items()
855 static int write_protect_page(struct vm_area_struct *vma, struct page *page, in write_protect_page() argument
858 struct mm_struct *mm = vma->vm_mm; in write_protect_page()
867 addr = page_address_in_vma(page, vma); in write_protect_page()
885 flush_cache_page(vma, addr, page_to_pfn(page)); in write_protect_page()
895 entry = ptep_clear_flush_notify(vma, addr, ptep); in write_protect_page()
929 static int replace_page(struct vm_area_struct *vma, struct page *page, in replace_page() argument
932 struct mm_struct *mm = vma->vm_mm; in replace_page()
941 addr = page_address_in_vma(page, vma); in replace_page()
960 page_add_anon_rmap(kpage, vma, addr); in replace_page()
962 flush_cache_page(vma, addr, pte_pfn(*ptep)); in replace_page()
963 ptep_clear_flush_notify(vma, addr, ptep); in replace_page()
964 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); in replace_page()
1015 static int try_to_merge_one_page(struct vm_area_struct *vma, in try_to_merge_one_page() argument
1024 if (!(vma->vm_flags & VM_MERGEABLE)) in try_to_merge_one_page()
1047 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page()
1058 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page()
1061 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { in try_to_merge_one_page()
1086 struct vm_area_struct *vma; in try_to_merge_with_ksm_page() local
1092 vma = find_vma(mm, rmap_item->address); in try_to_merge_with_ksm_page()
1093 if (!vma || vma->vm_start > rmap_item->address) in try_to_merge_with_ksm_page()
1096 err = try_to_merge_one_page(vma, page, kpage); in try_to_merge_with_ksm_page()
1104 rmap_item->anon_vma = vma->anon_vma; in try_to_merge_with_ksm_page()
1105 get_anon_vma(vma->anon_vma); in try_to_merge_with_ksm_page()
1539 struct vm_area_struct *vma; in scan_get_next_rmap_item() local
1602 vma = NULL; in scan_get_next_rmap_item()
1604 vma = find_vma(mm, ksm_scan.address); in scan_get_next_rmap_item()
1606 for (; vma; vma = vma->vm_next) { in scan_get_next_rmap_item()
1607 if (!(vma->vm_flags & VM_MERGEABLE)) in scan_get_next_rmap_item()
1609 if (ksm_scan.address < vma->vm_start) in scan_get_next_rmap_item()
1610 ksm_scan.address = vma->vm_start; in scan_get_next_rmap_item()
1611 if (!vma->anon_vma) in scan_get_next_rmap_item()
1612 ksm_scan.address = vma->vm_end; in scan_get_next_rmap_item()
1614 while (ksm_scan.address < vma->vm_end) { in scan_get_next_rmap_item()
1617 *page = follow_page(vma, ksm_scan.address, FOLL_GET); in scan_get_next_rmap_item()
1625 flush_anon_page(vma, *page, ksm_scan.address); in scan_get_next_rmap_item()
1738 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, in ksm_madvise() argument
1741 struct mm_struct *mm = vma->vm_mm; in ksm_madvise()
1772 if (vma->anon_vma) { in ksm_madvise()
1773 err = unmerge_ksm_pages(vma, start, end); in ksm_madvise()
1863 struct vm_area_struct *vma, unsigned long address) in ksm_might_need_to_copy() argument
1874 } else if (anon_vma->root == vma->anon_vma->root && in ksm_might_need_to_copy()
1875 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
1881 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in ksm_might_need_to_copy()
1883 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
1915 struct vm_area_struct *vma; in rmap_walk_ksm() local
1920 vma = vmac->vma; in rmap_walk_ksm()
1921 if (rmap_item->address < vma->vm_start || in rmap_walk_ksm()
1922 rmap_item->address >= vma->vm_end) in rmap_walk_ksm()
1930 if ((rmap_item->mm == vma->vm_mm) == search_new_forks) in rmap_walk_ksm()
1933 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) in rmap_walk_ksm()
1936 ret = rwc->rmap_one(page, vma, in rmap_walk_ksm()