Lines Matching refs:page
458 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
464 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
467 if (!page_mapped(page)) in page_get_anon_vma()
483 if (!page_mapped(page)) { in page_get_anon_vma()
501 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
508 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
511 if (!page_mapped(page)) in page_lock_anon_vma_read()
522 if (!page_mapped(page)) { in page_lock_anon_vma_read()
535 if (!page_mapped(page)) { in page_lock_anon_vma_read()
572 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
574 pgoff_t pgoff = page_to_pgoff(page); in __vma_address()
579 vma_address(struct page *page, struct vm_area_struct *vma) in vma_address() argument
581 unsigned long address = __vma_address(page, vma); in vma_address()
644 struct page *page, bool writable) in set_tlb_ubc_flush_pending() argument
680 struct page *page, bool writable) in set_tlb_ubc_flush_pending() argument
694 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
697 if (PageAnon(page)) { in page_address_in_vma()
698 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
706 } else if (page->mapping) { in page_address_in_vma()
707 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
711 address = __vma_address(page, vma); in page_address_in_vma()
755 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, in __page_check_address() argument
762 if (unlikely(PageHuge(page))) { in __page_check_address()
768 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); in __page_check_address()
786 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { in __page_check_address()
803 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) in page_mapped_in_vma() argument
809 address = __vma_address(page, vma); in page_mapped_in_vma()
812 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
829 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
837 if (unlikely(PageTransHuge(page))) { in page_referenced_one()
844 pmd = page_check_address_pmd(page, mm, address, in page_referenced_one()
866 pte = page_check_address(page, mm, address, &ptl, 0); in page_referenced_one()
891 clear_page_idle(page); in page_referenced_one()
892 if (test_and_clear_page_young(page)) in page_referenced_one()
928 int page_referenced(struct page *page, in page_referenced() argument
936 .mapcount = page_mapcount(page), in page_referenced()
946 if (!page_mapped(page)) in page_referenced()
949 if (!page_rmapping(page)) in page_referenced()
952 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
953 we_locked = trylock_page(page); in page_referenced()
967 ret = rmap_walk(page, &rwc); in page_referenced()
971 unlock_page(page); in page_referenced()
976 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
985 pte = page_check_address(page, mm, address, &ptl, 1); in page_mkclean_one()
1018 int page_mkclean(struct page *page) in page_mkclean() argument
1028 BUG_ON(!PageLocked(page)); in page_mkclean()
1030 if (!page_mapped(page)) in page_mkclean()
1033 mapping = page_mapping(page); in page_mkclean()
1037 rmap_walk(page, &rwc); in page_mkclean()
1054 void page_move_anon_rmap(struct page *page, in page_move_anon_rmap() argument
1059 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1061 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); in page_move_anon_rmap()
1069 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1079 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1086 if (PageAnon(page)) in __page_set_anon_rmap()
1098 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1099 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1108 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1124 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1125 BUG_ON(page->index != linear_page_index(vma, address)); in __page_check_anon_rmap()
1140 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1143 do_page_add_anon_rmap(page, vma, address, 0); in page_add_anon_rmap()
1151 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1154 int first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1162 if (PageTransHuge(page)) in do_page_add_anon_rmap()
1163 __inc_zone_page_state(page, in do_page_add_anon_rmap()
1165 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in do_page_add_anon_rmap()
1166 hpage_nr_pages(page)); in do_page_add_anon_rmap()
1168 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1171 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1174 __page_set_anon_rmap(page, vma, address, exclusive); in do_page_add_anon_rmap()
1176 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1189 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1193 SetPageSwapBacked(page); in page_add_new_anon_rmap()
1194 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ in page_add_new_anon_rmap()
1195 if (PageTransHuge(page)) in page_add_new_anon_rmap()
1196 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); in page_add_new_anon_rmap()
1197 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_add_new_anon_rmap()
1198 hpage_nr_pages(page)); in page_add_new_anon_rmap()
1199 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1208 void page_add_file_rmap(struct page *page) in page_add_file_rmap() argument
1212 memcg = mem_cgroup_begin_page_stat(page); in page_add_file_rmap()
1213 if (atomic_inc_and_test(&page->_mapcount)) { in page_add_file_rmap()
1214 __inc_zone_page_state(page, NR_FILE_MAPPED); in page_add_file_rmap()
1220 static void page_remove_file_rmap(struct page *page) in page_remove_file_rmap() argument
1224 memcg = mem_cgroup_begin_page_stat(page); in page_remove_file_rmap()
1227 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1231 if (unlikely(PageHuge(page))) in page_remove_file_rmap()
1239 __dec_zone_page_state(page, NR_FILE_MAPPED); in page_remove_file_rmap()
1242 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1243 clear_page_mlock(page); in page_remove_file_rmap()
1254 void page_remove_rmap(struct page *page) in page_remove_rmap() argument
1256 if (!PageAnon(page)) { in page_remove_rmap()
1257 page_remove_file_rmap(page); in page_remove_rmap()
1262 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1266 if (unlikely(PageHuge(page))) in page_remove_rmap()
1274 if (PageTransHuge(page)) in page_remove_rmap()
1275 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); in page_remove_rmap()
1277 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_remove_rmap()
1278 -hpage_nr_pages(page)); in page_remove_rmap()
1280 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1281 clear_page_mlock(page); in page_remove_rmap()
1297 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1311 pte = page_check_address(page, mm, address, &ptl, 0); in try_to_unmap_one()
1323 mlock_vma_page(page); in try_to_unmap_one()
1338 flush_cache_page(vma, address, page_to_pfn(page)); in try_to_unmap_one()
1349 set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval)); in try_to_unmap_one()
1356 set_page_dirty(page); in try_to_unmap_one()
1361 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1362 if (PageHuge(page)) { in try_to_unmap_one()
1363 hugetlb_count_sub(1 << compound_order(page), mm); in try_to_unmap_one()
1365 if (PageAnon(page)) in try_to_unmap_one()
1371 swp_entry_to_pte(make_hwpoison_entry(page))); in try_to_unmap_one()
1378 if (PageAnon(page)) in try_to_unmap_one()
1390 entry = make_migration_entry(page, pte_write(pteval)); in try_to_unmap_one()
1395 } else if (PageAnon(page)) { in try_to_unmap_one()
1396 swp_entry_t entry = { .val = page_private(page) }; in try_to_unmap_one()
1402 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in try_to_unmap_one()
1423 page_remove_rmap(page); in try_to_unmap_one()
1424 page_cache_release(page); in try_to_unmap_one()
1453 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1455 return !page_mapped(page); in page_not_mapped()
1472 int try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1482 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); in try_to_unmap()
1492 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1495 ret = rmap_walk(page, &rwc); in try_to_unmap()
1497 if (ret != SWAP_MLOCK && !page_mapped(page)) in try_to_unmap()
1517 int try_to_munlock(struct page *page) in try_to_munlock() argument
1528 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1530 ret = rmap_walk(page, &rwc); in try_to_munlock()
1543 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1549 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1557 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1579 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_anon() argument
1586 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1590 pgoff = page_to_pgoff(page); in rmap_walk_anon()
1593 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1600 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_anon()
1603 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1623 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_file() argument
1625 struct address_space *mapping = page->mapping; in rmap_walk_file()
1636 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
1641 pgoff = page_to_pgoff(page); in rmap_walk_file()
1644 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1651 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_file()
1654 if (rwc->done && rwc->done(page)) in rmap_walk_file()
1663 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
1665 if (unlikely(PageKsm(page))) in rmap_walk()
1666 return rmap_walk_ksm(page, rwc); in rmap_walk()
1667 else if (PageAnon(page)) in rmap_walk()
1668 return rmap_walk_anon(page, rwc); in rmap_walk()
1670 return rmap_walk_file(page, rwc); in rmap_walk()
1679 static void __hugepage_set_anon_rmap(struct page *page, in __hugepage_set_anon_rmap() argument
1686 if (PageAnon(page)) in __hugepage_set_anon_rmap()
1692 page->mapping = (struct address_space *) anon_vma; in __hugepage_set_anon_rmap()
1693 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1696 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
1702 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
1705 first = atomic_inc_and_test(&page->_mapcount); in hugepage_add_anon_rmap()
1707 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1710 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
1714 atomic_set(&page->_mapcount, 0); in hugepage_add_new_anon_rmap()
1715 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()