Lines Matching refs:page

99 	struct page *page;  in __try_to_reclaim_swap()  local
102 page = find_get_page(swap_address_space(entry), entry.val); in __try_to_reclaim_swap()
103 if (!page) in __try_to_reclaim_swap()
112 if (trylock_page(page)) { in __try_to_reclaim_swap()
113 ret = try_to_free_swap(page); in __try_to_reclaim_swap()
114 unlock_page(page); in __try_to_reclaim_swap()
116 page_cache_release(page); in __try_to_reclaim_swap()
862 int page_swapcount(struct page *page) in page_swapcount() argument
868 entry.val = page_private(page); in page_swapcount()
883 int reuse_swap_page(struct page *page) in reuse_swap_page() argument
887 VM_BUG_ON_PAGE(!PageLocked(page), page); in reuse_swap_page()
888 if (unlikely(PageKsm(page))) in reuse_swap_page()
890 count = page_mapcount(page); in reuse_swap_page()
891 if (count <= 1 && PageSwapCache(page)) { in reuse_swap_page()
892 count += page_swapcount(page); in reuse_swap_page()
893 if (count == 1 && !PageWriteback(page)) { in reuse_swap_page()
894 delete_from_swap_cache(page); in reuse_swap_page()
895 SetPageDirty(page); in reuse_swap_page()
905 int try_to_free_swap(struct page *page) in try_to_free_swap() argument
907 VM_BUG_ON_PAGE(!PageLocked(page), page); in try_to_free_swap()
909 if (!PageSwapCache(page)) in try_to_free_swap()
911 if (PageWriteback(page)) in try_to_free_swap()
913 if (page_swapcount(page)) in try_to_free_swap()
934 delete_from_swap_cache(page); in try_to_free_swap()
935 SetPageDirty(page); in try_to_free_swap()
946 struct page *page = NULL; in free_swap_and_cache() local
954 page = find_get_page(swap_address_space(entry), in free_swap_and_cache()
956 if (page && !trylock_page(page)) { in free_swap_and_cache()
957 page_cache_release(page); in free_swap_and_cache()
958 page = NULL; in free_swap_and_cache()
963 if (page) { in free_swap_and_cache()
968 if (PageSwapCache(page) && !PageWriteback(page) && in free_swap_and_cache()
969 (!page_mapped(page) || vm_swap_full())) { in free_swap_and_cache()
970 delete_from_swap_cache(page); in free_swap_and_cache()
971 SetPageDirty(page); in free_swap_and_cache()
973 unlock_page(page); in free_swap_and_cache()
974 page_cache_release(page); in free_swap_and_cache()
1093 unsigned long addr, swp_entry_t entry, struct page *page) in unuse_pte() argument
1095 struct page *swapcache; in unuse_pte()
1101 swapcache = page; in unuse_pte()
1102 page = ksm_might_need_to_copy(page, vma, addr); in unuse_pte()
1103 if (unlikely(!page)) in unuse_pte()
1106 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg)) { in unuse_pte()
1113 mem_cgroup_cancel_charge(page, memcg); in unuse_pte()
1120 get_page(page); in unuse_pte()
1122 pte_mkold(mk_pte(page, vma->vm_page_prot))); in unuse_pte()
1123 if (page == swapcache) { in unuse_pte()
1124 page_add_anon_rmap(page, vma, addr); in unuse_pte()
1125 mem_cgroup_commit_charge(page, memcg, true); in unuse_pte()
1127 page_add_new_anon_rmap(page, vma, addr); in unuse_pte()
1128 mem_cgroup_commit_charge(page, memcg, false); in unuse_pte()
1129 lru_cache_add_active_or_unevictable(page, vma); in unuse_pte()
1136 activate_page(page); in unuse_pte()
1140 if (page != swapcache) { in unuse_pte()
1141 unlock_page(page); in unuse_pte()
1142 put_page(page); in unuse_pte()
1149 swp_entry_t entry, struct page *page) in unuse_pte_range() argument
1172 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range()
1185 swp_entry_t entry, struct page *page) in unuse_pmd_range() argument
1196 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); in unuse_pmd_range()
1205 swp_entry_t entry, struct page *page) in unuse_pud_range() argument
1216 ret = unuse_pmd_range(vma, pud, addr, next, entry, page); in unuse_pud_range()
1224 swp_entry_t entry, struct page *page) in unuse_vma() argument
1230 if (page_anon_vma(page)) { in unuse_vma()
1231 addr = page_address_in_vma(page, vma); in unuse_vma()
1246 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); in unuse_vma()
1254 swp_entry_t entry, struct page *page) in unuse_mm() argument
1264 activate_page(page); in unuse_mm()
1265 unlock_page(page); in unuse_mm()
1267 lock_page(page); in unuse_mm()
1270 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) in unuse_mm()
1341 struct page *page; in try_to_unuse() local
1381 page = read_swap_cache_async(entry, in try_to_unuse()
1383 if (!page) { in try_to_unuse()
1421 wait_on_page_locked(page); in try_to_unuse()
1422 wait_on_page_writeback(page); in try_to_unuse()
1423 lock_page(page); in try_to_unuse()
1424 wait_on_page_writeback(page); in try_to_unuse()
1431 retval = shmem_unuse(entry, page); in try_to_unuse()
1438 retval = unuse_mm(start_mm, entry, page); in try_to_unuse()
1467 retval = unuse_mm(mm, entry, page); in try_to_unuse()
1483 unlock_page(page); in try_to_unuse()
1484 page_cache_release(page); in try_to_unuse()
1508 PageDirty(page) && PageSwapCache(page)) { in try_to_unuse()
1513 swap_writepage(page, &wbc); in try_to_unuse()
1514 lock_page(page); in try_to_unuse()
1515 wait_on_page_writeback(page); in try_to_unuse()
1525 if (PageSwapCache(page) && in try_to_unuse()
1526 likely(page_private(page) == entry.val)) in try_to_unuse()
1527 delete_from_swap_cache(page); in try_to_unuse()
1534 SetPageDirty(page); in try_to_unuse()
1535 unlock_page(page); in try_to_unuse()
1536 page_cache_release(page); in try_to_unuse()
1610 sector_t map_swap_page(struct page *page, struct block_device **bdev) in map_swap_page() argument
1613 entry.val = page_private(page); in map_swap_page()
2361 struct page *page = NULL; in SYSCALL_DEFINE2() local
2416 page = read_mapping_page(mapping, 0, swap_file); in SYSCALL_DEFINE2()
2417 if (IS_ERR(page)) { in SYSCALL_DEFINE2()
2418 error = PTR_ERR(page); in SYSCALL_DEFINE2()
2421 swap_header = kmap(page); in SYSCALL_DEFINE2()
2553 if (page && !IS_ERR(page)) { in SYSCALL_DEFINE2()
2554 kunmap(page); in SYSCALL_DEFINE2()
2555 page_cache_release(page); in SYSCALL_DEFINE2()
2701 struct swap_info_struct *page_swap_info(struct page *page) in page_swap_info() argument
2703 swp_entry_t swap = { .val = page_private(page) }; in page_swap_info()
2704 BUG_ON(!PageSwapCache(page)); in page_swap_info()
2711 struct address_space *__page_file_mapping(struct page *page) in __page_file_mapping() argument
2713 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in __page_file_mapping()
2714 return page_swap_info(page)->swap_file->f_mapping; in __page_file_mapping()
2718 pgoff_t __page_file_index(struct page *page) in __page_file_index() argument
2720 swp_entry_t swap = { .val = page_private(page) }; in __page_file_index()
2721 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in __page_file_index()
2744 struct page *head; in add_swap_count_continuation()
2745 struct page *page; in add_swap_count_continuation() local
2746 struct page *list_page; in add_swap_count_continuation()
2754 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()
2778 if (!page) { in add_swap_count_continuation()
2824 list_add_tail(&page->lru, &head->lru); in add_swap_count_continuation()
2825 page = NULL; /* now it's attached, don't free it */ in add_swap_count_continuation()
2829 if (page) in add_swap_count_continuation()
2830 __free_page(page); in add_swap_count_continuation()
2845 struct page *head; in swap_count_continued()
2846 struct page *page; in swap_count_continued() local
2856 page = list_entry(head->lru.next, struct page, lru); in swap_count_continued()
2857 map = kmap_atomic(page) + offset; in swap_count_continued()
2868 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued()
2869 BUG_ON(page == head); in swap_count_continued()
2870 map = kmap_atomic(page) + offset; in swap_count_continued()
2874 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued()
2875 if (page == head) in swap_count_continued()
2877 map = kmap_atomic(page) + offset; in swap_count_continued()
2882 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2883 while (page != head) { in swap_count_continued()
2884 map = kmap_atomic(page) + offset; in swap_count_continued()
2887 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2898 page = list_entry(page->lru.next, struct page, lru); in swap_count_continued()
2899 BUG_ON(page == head); in swap_count_continued()
2900 map = kmap_atomic(page) + offset; in swap_count_continued()
2907 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2908 while (page != head) { in swap_count_continued()
2909 map = kmap_atomic(page) + offset; in swap_count_continued()
2913 page = list_entry(page->lru.prev, struct page, lru); in swap_count_continued()
2928 struct page *head; in free_swap_count_continuations()
2933 struct page *page; in free_swap_count_continuations() local
2934 page = list_entry(this, struct page, lru); in free_swap_count_continuations()
2936 __free_page(page); in free_swap_count_continuations()