Lines Matching refs:page

109 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
115 struct page *prev; \
129 struct page *prev; \
476 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument
483 return page_count(page) - page_has_private(page) == 2; in is_page_cache_freeable()
510 struct page *page, int error) in handle_write_error() argument
512 lock_page(page); in handle_write_error()
513 if (page_mapping(page) == mapping) in handle_write_error()
515 unlock_page(page); in handle_write_error()
534 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument
553 if (!is_page_cache_freeable(page)) in pageout()
560 if (page_has_private(page)) { in pageout()
561 if (try_to_free_buffers(page)) { in pageout()
562 ClearPageDirty(page); in pageout()
574 if (clear_page_dirty_for_io(page)) { in pageout()
584 SetPageReclaim(page); in pageout()
585 res = mapping->a_ops->writepage(page, &wbc); in pageout()
587 handle_write_error(mapping, page, res); in pageout()
589 ClearPageReclaim(page); in pageout()
593 if (!PageWriteback(page)) { in pageout()
595 ClearPageReclaim(page); in pageout()
597 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page)); in pageout()
598 inc_zone_page_state(page, NR_VMSCAN_WRITE); in pageout()
609 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument
615 BUG_ON(!PageLocked(page)); in __remove_mapping()
616 BUG_ON(mapping != page_mapping(page)); in __remove_mapping()
618 memcg = mem_cgroup_begin_page_stat(page); in __remove_mapping()
645 if (!page_freeze_refs(page, 2)) in __remove_mapping()
648 if (unlikely(PageDirty(page))) { in __remove_mapping()
649 page_unfreeze_refs(page, 2); in __remove_mapping()
653 if (PageSwapCache(page)) { in __remove_mapping()
654 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping()
655 mem_cgroup_swapout(page, swap); in __remove_mapping()
656 __delete_from_swap_cache(page); in __remove_mapping()
661 void (*freepage)(struct page *); in __remove_mapping()
675 if (reclaimed && page_is_file_cache(page) && in __remove_mapping()
677 shadow = workingset_eviction(mapping, page); in __remove_mapping()
678 __delete_from_page_cache(page, shadow, memcg); in __remove_mapping()
683 freepage(page); in __remove_mapping()
700 int remove_mapping(struct address_space *mapping, struct page *page) in remove_mapping() argument
702 if (__remove_mapping(mapping, page, false)) { in remove_mapping()
708 page_unfreeze_refs(page, 1); in remove_mapping()
723 void putback_lru_page(struct page *page) in putback_lru_page() argument
726 int was_unevictable = PageUnevictable(page); in putback_lru_page()
728 VM_BUG_ON_PAGE(PageLRU(page), page); in putback_lru_page()
731 ClearPageUnevictable(page); in putback_lru_page()
733 if (page_evictable(page)) { in putback_lru_page()
741 lru_cache_add(page); in putback_lru_page()
748 add_page_to_unevictable_list(page); in putback_lru_page()
767 if (is_unevictable && page_evictable(page)) { in putback_lru_page()
768 if (!isolate_lru_page(page)) { in putback_lru_page()
769 put_page(page); in putback_lru_page()
783 put_page(page); /* drop ref from isolate */ in putback_lru_page()
793 static enum page_references page_check_references(struct page *page, in page_check_references() argument
799 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, in page_check_references()
801 referenced_page = TestClearPageReferenced(page); in page_check_references()
811 if (PageSwapBacked(page)) in page_check_references()
827 SetPageReferenced(page); in page_check_references()
842 if (referenced_page && !PageSwapBacked(page)) in page_check_references()
849 static void page_check_dirty_writeback(struct page *page, in page_check_dirty_writeback() argument
858 if (!page_is_file_cache(page)) { in page_check_dirty_writeback()
865 *dirty = PageDirty(page); in page_check_dirty_writeback()
866 *writeback = PageWriteback(page); in page_check_dirty_writeback()
869 if (!page_has_private(page)) in page_check_dirty_writeback()
872 mapping = page_mapping(page); in page_check_dirty_writeback()
874 mapping->a_ops->is_dirty_writeback(page, dirty, writeback); in page_check_dirty_writeback()
905 struct page *page; in shrink_page_list() local
912 page = lru_to_page(page_list); in shrink_page_list()
913 list_del(&page->lru); in shrink_page_list()
915 if (!trylock_page(page)) in shrink_page_list()
918 VM_BUG_ON_PAGE(PageActive(page), page); in shrink_page_list()
919 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in shrink_page_list()
923 if (unlikely(!page_evictable(page))) in shrink_page_list()
926 if (!sc->may_unmap && page_mapped(page)) in shrink_page_list()
930 if (page_mapped(page) || PageSwapCache(page)) in shrink_page_list()
934 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
942 page_check_dirty_writeback(page, &dirty, &writeback); in shrink_page_list()
955 mapping = page_mapping(page); in shrink_page_list()
958 (writeback && PageReclaim(page))) in shrink_page_list()
994 if (PageWriteback(page)) { in shrink_page_list()
997 PageReclaim(page) && in shrink_page_list()
1004 !PageReclaim(page) || !may_enter_fs) { in shrink_page_list()
1016 SetPageReclaim(page); in shrink_page_list()
1022 unlock_page(page); in shrink_page_list()
1023 wait_on_page_writeback(page); in shrink_page_list()
1025 list_add_tail(&page->lru, page_list); in shrink_page_list()
1031 references = page_check_references(page, sc); in shrink_page_list()
1047 if (PageAnon(page) && !PageSwapCache(page)) { in shrink_page_list()
1050 if (!add_to_swap(page, page_list)) in shrink_page_list()
1055 mapping = page_mapping(page); in shrink_page_list()
1062 if (page_mapped(page) && mapping) { in shrink_page_list()
1063 switch (try_to_unmap(page, in shrink_page_list()
1076 if (PageDirty(page)) { in shrink_page_list()
1082 if (page_is_file_cache(page) && in shrink_page_list()
1091 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); in shrink_page_list()
1092 SetPageReclaim(page); in shrink_page_list()
1110 switch (pageout(page, mapping, sc)) { in shrink_page_list()
1116 if (PageWriteback(page)) in shrink_page_list()
1118 if (PageDirty(page)) in shrink_page_list()
1125 if (!trylock_page(page)) in shrink_page_list()
1127 if (PageDirty(page) || PageWriteback(page)) in shrink_page_list()
1129 mapping = page_mapping(page); in shrink_page_list()
1156 if (page_has_private(page)) { in shrink_page_list()
1157 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1159 if (!mapping && page_count(page) == 1) { in shrink_page_list()
1160 unlock_page(page); in shrink_page_list()
1161 if (put_page_testzero(page)) in shrink_page_list()
1177 if (!mapping || !__remove_mapping(mapping, page, true)) in shrink_page_list()
1187 __clear_page_locked(page); in shrink_page_list()
1195 list_add(&page->lru, &free_pages); in shrink_page_list()
1199 if (PageSwapCache(page)) in shrink_page_list()
1200 try_to_free_swap(page); in shrink_page_list()
1201 unlock_page(page); in shrink_page_list()
1202 list_add(&page->lru, &ret_pages); in shrink_page_list()
1207 if (PageSwapCache(page) && vm_swap_full()) in shrink_page_list()
1208 try_to_free_swap(page); in shrink_page_list()
1209 VM_BUG_ON_PAGE(PageActive(page), page); in shrink_page_list()
1210 SetPageActive(page); in shrink_page_list()
1213 unlock_page(page); in shrink_page_list()
1215 list_add(&page->lru, &ret_pages); in shrink_page_list()
1216 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); in shrink_page_list()
1243 struct page *page, *next; in reclaim_clean_pages_from_list() local
1246 list_for_each_entry_safe(page, next, page_list, lru) { in reclaim_clean_pages_from_list()
1247 if (page_is_file_cache(page) && !PageDirty(page) && in reclaim_clean_pages_from_list()
1248 !isolated_balloon_page(page)) { in reclaim_clean_pages_from_list()
1249 ClearPageActive(page); in reclaim_clean_pages_from_list()
1250 list_move(&page->lru, &clean_pages); in reclaim_clean_pages_from_list()
1272 int __isolate_lru_page(struct page *page, isolate_mode_t mode) in __isolate_lru_page() argument
1277 if (!PageLRU(page)) in __isolate_lru_page()
1281 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) in __isolate_lru_page()
1299 if (PageWriteback(page)) in __isolate_lru_page()
1302 if (PageDirty(page)) { in __isolate_lru_page()
1314 mapping = page_mapping(page); in __isolate_lru_page()
1320 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) in __isolate_lru_page()
1323 if (likely(get_page_unless_zero(page))) { in __isolate_lru_page()
1329 ClearPageLRU(page); in __isolate_lru_page()
1367 struct page *page; in isolate_lru_pages() local
1370 page = lru_to_page(src); in isolate_lru_pages()
1371 prefetchw_prev_lru_page(page, src, flags); in isolate_lru_pages()
1373 VM_BUG_ON_PAGE(!PageLRU(page), page); in isolate_lru_pages()
1375 switch (__isolate_lru_page(page, mode)) { in isolate_lru_pages()
1377 nr_pages = hpage_nr_pages(page); in isolate_lru_pages()
1379 list_move(&page->lru, dst); in isolate_lru_pages()
1385 list_move(&page->lru, src); in isolate_lru_pages()
1424 int isolate_lru_page(struct page *page) in isolate_lru_page() argument
1428 VM_BUG_ON_PAGE(!page_count(page), page); in isolate_lru_page()
1430 if (PageLRU(page)) { in isolate_lru_page()
1431 struct zone *zone = page_zone(page); in isolate_lru_page()
1435 lruvec = mem_cgroup_page_lruvec(page, zone); in isolate_lru_page()
1436 if (PageLRU(page)) { in isolate_lru_page()
1437 int lru = page_lru(page); in isolate_lru_page()
1438 get_page(page); in isolate_lru_page()
1439 ClearPageLRU(page); in isolate_lru_page()
1440 del_page_from_lru_list(page, lruvec, lru); in isolate_lru_page()
1496 struct page *page = lru_to_page(page_list); in putback_inactive_pages() local
1499 VM_BUG_ON_PAGE(PageLRU(page), page); in putback_inactive_pages()
1500 list_del(&page->lru); in putback_inactive_pages()
1501 if (unlikely(!page_evictable(page))) { in putback_inactive_pages()
1503 putback_lru_page(page); in putback_inactive_pages()
1508 lruvec = mem_cgroup_page_lruvec(page, zone); in putback_inactive_pages()
1510 SetPageLRU(page); in putback_inactive_pages()
1511 lru = page_lru(page); in putback_inactive_pages()
1512 add_page_to_lru_list(page, lruvec, lru); in putback_inactive_pages()
1516 int numpages = hpage_nr_pages(page); in putback_inactive_pages()
1519 if (put_page_testzero(page)) { in putback_inactive_pages()
1520 __ClearPageLRU(page); in putback_inactive_pages()
1521 __ClearPageActive(page); in putback_inactive_pages()
1522 del_page_from_lru_list(page, lruvec, lru); in putback_inactive_pages()
1524 if (unlikely(PageCompound(page))) { in putback_inactive_pages()
1526 mem_cgroup_uncharge(page); in putback_inactive_pages()
1527 (*get_compound_page_dtor(page))(page); in putback_inactive_pages()
1530 list_add(&page->lru, &pages_to_free); in putback_inactive_pages()
1727 struct page *page; in move_active_pages_to_lru() local
1731 page = lru_to_page(list); in move_active_pages_to_lru()
1732 lruvec = mem_cgroup_page_lruvec(page, zone); in move_active_pages_to_lru()
1734 VM_BUG_ON_PAGE(PageLRU(page), page); in move_active_pages_to_lru()
1735 SetPageLRU(page); in move_active_pages_to_lru()
1737 nr_pages = hpage_nr_pages(page); in move_active_pages_to_lru()
1739 list_move(&page->lru, &lruvec->lists[lru]); in move_active_pages_to_lru()
1742 if (put_page_testzero(page)) { in move_active_pages_to_lru()
1743 __ClearPageLRU(page); in move_active_pages_to_lru()
1744 __ClearPageActive(page); in move_active_pages_to_lru()
1745 del_page_from_lru_list(page, lruvec, lru); in move_active_pages_to_lru()
1747 if (unlikely(PageCompound(page))) { in move_active_pages_to_lru()
1749 mem_cgroup_uncharge(page); in move_active_pages_to_lru()
1750 (*get_compound_page_dtor(page))(page); in move_active_pages_to_lru()
1753 list_add(&page->lru, pages_to_free); in move_active_pages_to_lru()
1772 struct page *page; in shrink_active_list() local
1802 page = lru_to_page(&l_hold); in shrink_active_list()
1803 list_del(&page->lru); in shrink_active_list()
1805 if (unlikely(!page_evictable(page))) { in shrink_active_list()
1806 putback_lru_page(page); in shrink_active_list()
1811 if (page_has_private(page) && trylock_page(page)) { in shrink_active_list()
1812 if (page_has_private(page)) in shrink_active_list()
1813 try_to_release_page(page, 0); in shrink_active_list()
1814 unlock_page(page); in shrink_active_list()
1818 if (page_referenced(page, 0, sc->target_mem_cgroup, in shrink_active_list()
1820 nr_rotated += hpage_nr_pages(page); in shrink_active_list()
1830 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { in shrink_active_list()
1831 list_add(&page->lru, &l_active); in shrink_active_list()
1836 ClearPageActive(page); /* we are de-activating */ in shrink_active_list()
1837 list_add(&page->lru, &l_inactive); in shrink_active_list()
3832 int page_evictable(struct page *page) in page_evictable() argument
3834 return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); in page_evictable()
3847 void check_move_unevictable_pages(struct page **pages, int nr_pages) in check_move_unevictable_pages()
3856 struct page *page = pages[i]; in check_move_unevictable_pages() local
3860 pagezone = page_zone(page); in check_move_unevictable_pages()
3867 lruvec = mem_cgroup_page_lruvec(page, zone); in check_move_unevictable_pages()
3869 if (!PageLRU(page) || !PageUnevictable(page)) in check_move_unevictable_pages()
3872 if (page_evictable(page)) { in check_move_unevictable_pages()
3873 enum lru_list lru = page_lru_base_type(page); in check_move_unevictable_pages()
3875 VM_BUG_ON_PAGE(PageActive(page), page); in check_move_unevictable_pages()
3876 ClearPageUnevictable(page); in check_move_unevictable_pages()
3877 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); in check_move_unevictable_pages()
3878 add_page_to_lru_list(page, lruvec, lru); in check_move_unevictable_pages()