Lines Matching refs:page
365 struct page *page; in break_ksm() local
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); in break_ksm()
371 if (IS_ERR_OR_NULL(page)) in break_ksm()
373 if (PageKsm(page)) in break_ksm()
378 put_page(page); in break_ksm()
444 static struct page *page_trans_compound_anon(struct page *page) in page_trans_compound_anon() argument
446 if (PageTransCompound(page)) { in page_trans_compound_anon()
447 struct page *head = compound_head(page); in page_trans_compound_anon()
458 static struct page *get_mergeable_page(struct rmap_item *rmap_item) in get_mergeable_page()
463 struct page *page; in get_mergeable_page() local
470 page = follow_page(vma, addr, FOLL_GET); in get_mergeable_page()
471 if (IS_ERR_OR_NULL(page)) in get_mergeable_page()
473 if (PageAnon(page) || page_trans_compound_anon(page)) { in get_mergeable_page()
474 flush_anon_page(vma, page, addr); in get_mergeable_page()
475 flush_dcache_page(page); in get_mergeable_page()
477 put_page(page); in get_mergeable_page()
478 out: page = NULL; in get_mergeable_page()
481 return page; in get_mergeable_page()
536 static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) in get_ksm_page()
538 struct page *page; in get_ksm_page() local
546 page = pfn_to_page(kpfn); in get_ksm_page()
554 if (READ_ONCE(page->mapping) != expected_mapping) in get_ksm_page()
566 while (!get_page_unless_zero(page)) { in get_ksm_page()
575 if (!PageSwapCache(page)) in get_ksm_page()
580 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
581 put_page(page); in get_ksm_page()
586 lock_page(page); in get_ksm_page()
587 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
588 unlock_page(page); in get_ksm_page()
589 put_page(page); in get_ksm_page()
593 return page; in get_ksm_page()
617 struct page *page; in remove_rmap_item_from_tree() local
620 page = get_ksm_page(stable_node, true); in remove_rmap_item_from_tree()
621 if (!page) in remove_rmap_item_from_tree()
625 unlock_page(page); in remove_rmap_item_from_tree()
626 put_page(page); in remove_rmap_item_from_tree()
704 struct page *page; in remove_stable_node() local
707 page = get_ksm_page(stable_node, true); in remove_stable_node()
708 if (!page) { in remove_stable_node()
715 if (WARN_ON_ONCE(page_mapped(page))) { in remove_stable_node()
730 set_page_stable_node(page, NULL); in remove_stable_node()
735 unlock_page(page); in remove_stable_node()
736 put_page(page); in remove_stable_node()
828 static u32 calc_checksum(struct page *page) in calc_checksum() argument
831 void *addr = kmap_atomic(page); in calc_checksum()
837 static int memcmp_pages(struct page *page1, struct page *page2) in memcmp_pages()
850 static inline int pages_identical(struct page *page1, struct page *page2) in pages_identical()
855 static int write_protect_page(struct vm_area_struct *vma, struct page *page, in write_protect_page() argument
867 addr = page_address_in_vma(page, vma); in write_protect_page()
871 BUG_ON(PageTransCompound(page)); in write_protect_page()
877 ptep = page_check_address(page, mm, addr, &ptl, 0); in write_protect_page()
884 swapped = PageSwapCache(page); in write_protect_page()
885 flush_cache_page(vma, addr, page_to_pfn(page)); in write_protect_page()
900 if (page_mapcount(page) + 1 + swapped != page_count(page)) { in write_protect_page()
905 set_page_dirty(page); in write_protect_page()
929 static int replace_page(struct vm_area_struct *vma, struct page *page, in replace_page() argument
930 struct page *kpage, pte_t orig_pte) in replace_page()
941 addr = page_address_in_vma(page, vma); in replace_page()
966 page_remove_rmap(page); in replace_page()
967 if (!page_mapped(page)) in replace_page()
968 try_to_free_swap(page); in replace_page()
969 put_page(page); in replace_page()
979 static int page_trans_compound_anon_split(struct page *page) in page_trans_compound_anon_split() argument
982 struct page *transhuge_head = page_trans_compound_anon(page); in page_trans_compound_anon_split()
1016 struct page *page, struct page *kpage) in try_to_merge_one_page() argument
1021 if (page == kpage) /* ksm page forked */ in try_to_merge_one_page()
1026 if (PageTransCompound(page) && page_trans_compound_anon_split(page)) in try_to_merge_one_page()
1028 BUG_ON(PageTransCompound(page)); in try_to_merge_one_page()
1029 if (!PageAnon(page)) in try_to_merge_one_page()
1039 if (!trylock_page(page)) in try_to_merge_one_page()
1047 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page()
1054 set_page_stable_node(page, NULL); in try_to_merge_one_page()
1055 mark_page_accessed(page); in try_to_merge_one_page()
1057 } else if (pages_identical(page, kpage)) in try_to_merge_one_page()
1058 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page()
1062 munlock_vma_page(page); in try_to_merge_one_page()
1064 unlock_page(page); in try_to_merge_one_page()
1067 page = kpage; /* for final unlock */ in try_to_merge_one_page()
1071 unlock_page(page); in try_to_merge_one_page()
1083 struct page *page, struct page *kpage) in try_to_merge_with_ksm_page() argument
1096 err = try_to_merge_one_page(vma, page, kpage); in try_to_merge_with_ksm_page()
1121 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, in try_to_merge_two_pages()
1122 struct page *page, in try_to_merge_two_pages() argument
1124 struct page *tree_page) in try_to_merge_two_pages()
1128 err = try_to_merge_with_ksm_page(rmap_item, page, NULL); in try_to_merge_two_pages()
1131 tree_page, page); in try_to_merge_two_pages()
1139 return err ? NULL : page; in try_to_merge_two_pages()
1151 static struct page *stable_tree_search(struct page *page) in stable_tree_search() argument
1160 page_node = page_stable_node(page); in stable_tree_search()
1163 get_page(page); in stable_tree_search()
1164 return page; in stable_tree_search()
1167 nid = get_kpfn_nid(page_to_pfn(page)); in stable_tree_search()
1174 struct page *tree_page; in stable_tree_search()
1183 ret = memcmp_pages(page, tree_page); in stable_tree_search()
1226 get_page(page); in stable_tree_search()
1227 return page; in stable_tree_search()
1234 get_page(page); in stable_tree_search()
1237 page = NULL; in stable_tree_search()
1241 return page; in stable_tree_search()
1251 static struct stable_node *stable_tree_insert(struct page *kpage) in stable_tree_insert()
1266 struct page *tree_page; in stable_tree_insert()
1323 struct page *page, in unstable_tree_search_insert() argument
1324 struct page **tree_pagep) in unstable_tree_search_insert()
1331 nid = get_kpfn_nid(page_to_pfn(page)); in unstable_tree_search_insert()
1337 struct page *tree_page; in unstable_tree_search_insert()
1349 if (page == tree_page) { in unstable_tree_search_insert()
1354 ret = memcmp_pages(page, tree_page); in unstable_tree_search_insert()
1415 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) in cmp_and_merge_page() argument
1418 struct page *tree_page = NULL; in cmp_and_merge_page()
1420 struct page *kpage; in cmp_and_merge_page()
1424 stable_node = page_stable_node(page); in cmp_and_merge_page()
1439 kpage = stable_tree_search(page); in cmp_and_merge_page()
1440 if (kpage == page && rmap_item->head == stable_node) { in cmp_and_merge_page()
1448 err = try_to_merge_with_ksm_page(rmap_item, page, kpage); in cmp_and_merge_page()
1468 checksum = calc_checksum(page); in cmp_and_merge_page()
1475 unstable_tree_search_insert(rmap_item, page, &tree_page); in cmp_and_merge_page()
1477 kpage = try_to_merge_two_pages(rmap_item, page, in cmp_and_merge_page()
1535 static struct rmap_item *scan_get_next_rmap_item(struct page **page) in scan_get_next_rmap_item() argument
1569 struct page *page; in scan_get_next_rmap_item() local
1574 page = get_ksm_page(stable_node, false); in scan_get_next_rmap_item()
1575 if (page) in scan_get_next_rmap_item()
1576 put_page(page); in scan_get_next_rmap_item()
1617 *page = follow_page(vma, ksm_scan.address, FOLL_GET); in scan_get_next_rmap_item()
1618 if (IS_ERR_OR_NULL(*page)) { in scan_get_next_rmap_item()
1623 if (PageAnon(*page) || in scan_get_next_rmap_item()
1624 page_trans_compound_anon(*page)) { in scan_get_next_rmap_item()
1625 flush_anon_page(vma, *page, ksm_scan.address); in scan_get_next_rmap_item()
1626 flush_dcache_page(*page); in scan_get_next_rmap_item()
1634 put_page(*page); in scan_get_next_rmap_item()
1638 put_page(*page); in scan_get_next_rmap_item()
1696 struct page *uninitialized_var(page); in ksm_do_scan()
1700 rmap_item = scan_get_next_rmap_item(&page); in ksm_do_scan()
1703 cmp_and_merge_page(page, rmap_item); in ksm_do_scan()
1704 put_page(page); in ksm_do_scan()
1862 struct page *ksm_might_need_to_copy(struct page *page, in ksm_might_need_to_copy() argument
1865 struct anon_vma *anon_vma = page_anon_vma(page); in ksm_might_need_to_copy()
1866 struct page *new_page; in ksm_might_need_to_copy()
1868 if (PageKsm(page)) { in ksm_might_need_to_copy()
1869 if (page_stable_node(page) && in ksm_might_need_to_copy()
1871 return page; /* no need to copy it */ in ksm_might_need_to_copy()
1873 return page; /* no need to copy it */ in ksm_might_need_to_copy()
1875 page->index == linear_page_index(vma, address)) { in ksm_might_need_to_copy()
1876 return page; /* still no need to copy it */ in ksm_might_need_to_copy()
1878 if (!PageUptodate(page)) in ksm_might_need_to_copy()
1879 return page; /* let do_swap_page report the error */ in ksm_might_need_to_copy()
1883 copy_user_highpage(new_page, page, address, vma); in ksm_might_need_to_copy()
1893 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_ksm() argument
1900 VM_BUG_ON_PAGE(!PageKsm(page), page); in rmap_walk_ksm()
1906 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_ksm()
1908 stable_node = page_stable_node(page); in rmap_walk_ksm()
1936 ret = rwc->rmap_one(page, vma, in rmap_walk_ksm()
1942 if (rwc->done && rwc->done(page)) { in rmap_walk_ksm()
1956 void ksm_migrate_page(struct page *newpage, struct page *oldpage) in ksm_migrate_page()