Lines Matching refs:page

453 struct anon_vma *page_get_anon_vma(struct page *page)  in page_get_anon_vma()  argument
459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
462 if (!page_mapped(page)) in page_get_anon_vma()
478 if (!page_mapped(page)) { in page_get_anon_vma()
496 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
506 if (!page_mapped(page)) in page_lock_anon_vma_read()
517 if (!page_mapped(page)) { in page_lock_anon_vma_read()
530 if (!page_mapped(page)) { in page_lock_anon_vma_read()
567 __vma_address(struct page *page, struct vm_area_struct *vma) in __vma_address() argument
569 pgoff_t pgoff = page_to_pgoff(page); in __vma_address()
574 vma_address(struct page *page, struct vm_area_struct *vma) in vma_address() argument
576 unsigned long address = __vma_address(page, vma); in vma_address()
588 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
591 if (PageAnon(page)) { in page_address_in_vma()
592 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
600 } else if (page->mapping) { in page_address_in_vma()
601 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
605 address = __vma_address(page, vma); in page_address_in_vma()
649 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, in __page_check_address() argument
656 if (unlikely(PageHuge(page))) { in __page_check_address()
662 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); in __page_check_address()
680 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { in __page_check_address()
697 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) in page_mapped_in_vma() argument
703 address = __vma_address(page, vma); in page_mapped_in_vma()
706 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); in page_mapped_in_vma()
723 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
731 if (unlikely(PageTransHuge(page))) { in page_referenced_one()
738 pmd = page_check_address_pmd(page, mm, address, in page_referenced_one()
760 pte = page_check_address(page, mm, address, &ptl, 0); in page_referenced_one()
817 int page_referenced(struct page *page, in page_referenced() argument
825 .mapcount = page_mapcount(page), in page_referenced()
835 if (!page_mapped(page)) in page_referenced()
838 if (!page_rmapping(page)) in page_referenced()
841 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
842 we_locked = trylock_page(page); in page_referenced()
856 ret = rmap_walk(page, &rwc); in page_referenced()
860 unlock_page(page); in page_referenced()
865 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
874 pte = page_check_address(page, mm, address, &ptl, 1); in page_mkclean_one()
907 int page_mkclean(struct page *page) in page_mkclean() argument
917 BUG_ON(!PageLocked(page)); in page_mkclean()
919 if (!page_mapped(page)) in page_mkclean()
922 mapping = page_mapping(page); in page_mkclean()
926 rmap_walk(page, &rwc); in page_mkclean()
943 void page_move_anon_rmap(struct page *page, in page_move_anon_rmap() argument
948 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
950 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); in page_move_anon_rmap()
953 page->mapping = (struct address_space *) anon_vma; in page_move_anon_rmap()
963 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
970 if (PageAnon(page)) in __page_set_anon_rmap()
982 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
983 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
992 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1008 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1009 BUG_ON(page->index != linear_page_index(vma, address)); in __page_check_anon_rmap()
1024 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1027 do_page_add_anon_rmap(page, vma, address, 0); in page_add_anon_rmap()
1035 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1038 int first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1046 if (PageTransHuge(page)) in do_page_add_anon_rmap()
1047 __inc_zone_page_state(page, in do_page_add_anon_rmap()
1049 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in do_page_add_anon_rmap()
1050 hpage_nr_pages(page)); in do_page_add_anon_rmap()
1052 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1055 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1058 __page_set_anon_rmap(page, vma, address, exclusive); in do_page_add_anon_rmap()
1060 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1073 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1077 SetPageSwapBacked(page); in page_add_new_anon_rmap()
1078 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ in page_add_new_anon_rmap()
1079 if (PageTransHuge(page)) in page_add_new_anon_rmap()
1080 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); in page_add_new_anon_rmap()
1081 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_add_new_anon_rmap()
1082 hpage_nr_pages(page)); in page_add_new_anon_rmap()
1083 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1092 void page_add_file_rmap(struct page *page) in page_add_file_rmap() argument
1096 memcg = mem_cgroup_begin_page_stat(page); in page_add_file_rmap()
1097 if (atomic_inc_and_test(&page->_mapcount)) { in page_add_file_rmap()
1098 __inc_zone_page_state(page, NR_FILE_MAPPED); in page_add_file_rmap()
1104 static void page_remove_file_rmap(struct page *page) in page_remove_file_rmap() argument
1108 memcg = mem_cgroup_begin_page_stat(page); in page_remove_file_rmap()
1111 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1115 if (unlikely(PageHuge(page))) in page_remove_file_rmap()
1123 __dec_zone_page_state(page, NR_FILE_MAPPED); in page_remove_file_rmap()
1126 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1127 clear_page_mlock(page); in page_remove_file_rmap()
1138 void page_remove_rmap(struct page *page) in page_remove_rmap() argument
1140 if (!PageAnon(page)) { in page_remove_rmap()
1141 page_remove_file_rmap(page); in page_remove_rmap()
1146 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1150 if (unlikely(PageHuge(page))) in page_remove_rmap()
1158 if (PageTransHuge(page)) in page_remove_rmap()
1159 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); in page_remove_rmap()
1161 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, in page_remove_rmap()
1162 -hpage_nr_pages(page)); in page_remove_rmap()
1164 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1165 clear_page_mlock(page); in page_remove_rmap()
1181 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1191 pte = page_check_address(page, mm, address, &ptl, 0); in try_to_unmap_one()
1215 flush_cache_page(vma, address, page_to_pfn(page)); in try_to_unmap_one()
1220 set_page_dirty(page); in try_to_unmap_one()
1225 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1226 if (!PageHuge(page)) { in try_to_unmap_one()
1227 if (PageAnon(page)) in try_to_unmap_one()
1233 swp_entry_to_pte(make_hwpoison_entry(page))); in try_to_unmap_one()
1240 if (PageAnon(page)) in try_to_unmap_one()
1244 } else if (PageAnon(page)) { in try_to_unmap_one()
1245 swp_entry_t entry = { .val = page_private(page) }; in try_to_unmap_one()
1248 if (PageSwapCache(page)) { in try_to_unmap_one()
1273 entry = make_migration_entry(page, pte_write(pteval)); in try_to_unmap_one()
1283 entry = make_migration_entry(page, pte_write(pteval)); in try_to_unmap_one()
1288 page_remove_rmap(page); in try_to_unmap_one()
1289 page_cache_release(page); in try_to_unmap_one()
1312 mlock_vma_page(page); in try_to_unmap_one()
1339 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1341 return !page_mapped(page); in page_not_mapped()
1358 int try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1368 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); in try_to_unmap()
1378 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1381 ret = rmap_walk(page, &rwc); in try_to_unmap()
1383 if (ret != SWAP_MLOCK && !page_mapped(page)) in try_to_unmap()
1403 int try_to_munlock(struct page *page) in try_to_munlock() argument
1414 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1416 ret = rmap_walk(page, &rwc); in try_to_munlock()
1429 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1435 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1443 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1465 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_anon() argument
1472 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1476 pgoff = page_to_pgoff(page); in rmap_walk_anon()
1479 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1484 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_anon()
1487 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1507 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_file() argument
1509 struct address_space *mapping = page->mapping; in rmap_walk_file()
1520 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
1525 pgoff = page_to_pgoff(page); in rmap_walk_file()
1528 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1533 ret = rwc->rmap_one(page, vma, address, rwc->arg); in rmap_walk_file()
1536 if (rwc->done && rwc->done(page)) in rmap_walk_file()
1545 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
1547 if (unlikely(PageKsm(page))) in rmap_walk()
1548 return rmap_walk_ksm(page, rwc); in rmap_walk()
1549 else if (PageAnon(page)) in rmap_walk()
1550 return rmap_walk_anon(page, rwc); in rmap_walk()
1552 return rmap_walk_file(page, rwc); in rmap_walk()
1561 static void __hugepage_set_anon_rmap(struct page *page, in __hugepage_set_anon_rmap() argument
1568 if (PageAnon(page)) in __hugepage_set_anon_rmap()
1574 page->mapping = (struct address_space *) anon_vma; in __hugepage_set_anon_rmap()
1575 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1578 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
1584 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
1587 first = atomic_inc_and_test(&page->_mapcount); in hugepage_add_anon_rmap()
1589 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1592 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
1596 atomic_set(&page->_mapcount, 0); in hugepage_add_new_anon_rmap()
1597 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()