Lines Matching refs:page
85 struct page *page; in putback_movable_pages() local
86 struct page *page2; in putback_movable_pages()
88 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
89 if (unlikely(PageHuge(page))) { in putback_movable_pages()
90 putback_active_hugepage(page); in putback_movable_pages()
93 list_del(&page->lru); in putback_movable_pages()
94 dec_zone_page_state(page, NR_ISOLATED_ANON + in putback_movable_pages()
95 page_is_file_cache(page)); in putback_movable_pages()
96 if (unlikely(isolated_balloon_page(page))) in putback_movable_pages()
97 balloon_page_putback(page); in putback_movable_pages()
99 putback_lru_page(page); in putback_movable_pages()
106 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, in remove_migration_pte()
189 static void remove_migration_ptes(struct page *old, struct page *new) in remove_migration_ptes()
209 struct page *page; in __migration_entry_wait() local
220 page = migration_entry_to_page(entry); in __migration_entry_wait()
229 if (!get_page_unless_zero(page)) in __migration_entry_wait()
232 wait_on_page_locked(page); in __migration_entry_wait()
233 put_page(page); in __migration_entry_wait()
313 struct page *newpage, struct page *page, in migrate_page_move_mapping() argument
324 if (page_count(page) != expected_count) in migrate_page_move_mapping()
328 set_page_memcg(newpage, page_memcg(page)); in migrate_page_move_mapping()
329 newpage->index = page->index; in migrate_page_move_mapping()
330 newpage->mapping = page->mapping; in migrate_page_move_mapping()
331 if (PageSwapBacked(page)) in migrate_page_move_mapping()
337 oldzone = page_zone(page); in migrate_page_move_mapping()
343 page_index(page)); in migrate_page_move_mapping()
345 expected_count += 1 + page_has_private(page); in migrate_page_move_mapping()
346 if (page_count(page) != expected_count || in migrate_page_move_mapping()
347 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping()
352 if (!page_freeze_refs(page, expected_count)) { in migrate_page_move_mapping()
366 page_unfreeze_refs(page, expected_count); in migrate_page_move_mapping()
375 set_page_memcg(newpage, page_memcg(page)); in migrate_page_move_mapping()
376 newpage->index = page->index; in migrate_page_move_mapping()
377 newpage->mapping = page->mapping; in migrate_page_move_mapping()
378 if (PageSwapBacked(page)) in migrate_page_move_mapping()
382 if (PageSwapCache(page)) { in migrate_page_move_mapping()
384 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
388 dirty = PageDirty(page); in migrate_page_move_mapping()
390 ClearPageDirty(page); in migrate_page_move_mapping()
401 page_unfreeze_refs(page, expected_count - 1); in migrate_page_move_mapping()
419 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
438 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
446 page_index(page)); in migrate_huge_page_move_mapping()
448 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
449 if (page_count(page) != expected_count || in migrate_huge_page_move_mapping()
450 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_huge_page_move_mapping()
455 if (!page_freeze_refs(page, expected_count)) { in migrate_huge_page_move_mapping()
460 set_page_memcg(newpage, page_memcg(page)); in migrate_huge_page_move_mapping()
461 newpage->index = page->index; in migrate_huge_page_move_mapping()
462 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
467 page_unfreeze_refs(page, expected_count - 1); in migrate_huge_page_move_mapping()
478 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
482 struct page *dst_base = dst; in __copy_gigantic_page()
483 struct page *src_base = src; in __copy_gigantic_page()
495 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
524 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
528 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
529 copy_huge_page(newpage, page); in migrate_page_copy()
531 copy_highpage(newpage, page); in migrate_page_copy()
533 if (PageError(page)) in migrate_page_copy()
535 if (PageReferenced(page)) in migrate_page_copy()
537 if (PageUptodate(page)) in migrate_page_copy()
539 if (TestClearPageActive(page)) { in migrate_page_copy()
540 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_copy()
542 } else if (TestClearPageUnevictable(page)) in migrate_page_copy()
544 if (PageChecked(page)) in migrate_page_copy()
546 if (PageMappedToDisk(page)) in migrate_page_copy()
550 if (PageDirty(page)) in migrate_page_copy()
553 if (page_is_young(page)) in migrate_page_copy()
555 if (page_is_idle(page)) in migrate_page_copy()
562 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_copy()
565 ksm_migrate_page(newpage, page); in migrate_page_copy()
570 if (PageSwapCache(page)) in migrate_page_copy()
571 ClearPageSwapCache(page); in migrate_page_copy()
572 ClearPagePrivate(page); in migrate_page_copy()
573 set_page_private(page, 0); in migrate_page_copy()
594 struct page *newpage, struct page *page, in migrate_page() argument
599 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
601 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); in migrate_page()
606 migrate_page_copy(newpage, page); in migrate_page()
618 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
623 if (!page_has_buffers(page)) in buffer_migrate_page()
624 return migrate_page(mapping, newpage, page, mode); in buffer_migrate_page()
626 head = page_buffers(page); in buffer_migrate_page()
628 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); in buffer_migrate_page()
641 ClearPagePrivate(page); in buffer_migrate_page()
642 set_page_private(newpage, page_private(page)); in buffer_migrate_page()
643 set_page_private(page, 0); in buffer_migrate_page()
644 put_page(page); in buffer_migrate_page()
656 migrate_page_copy(newpage, page); in buffer_migrate_page()
674 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
689 if (!clear_page_dirty_for_io(page)) in writeout()
701 remove_migration_ptes(page, page); in writeout()
703 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
707 lock_page(page); in writeout()
716 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
718 if (PageDirty(page)) { in fallback_migrate_page()
722 return writeout(mapping, page); in fallback_migrate_page()
729 if (page_has_private(page) && in fallback_migrate_page()
730 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
733 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
747 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
753 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
756 mapping = page_mapping(page); in move_to_new_page()
758 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
766 rc = mapping->a_ops->migratepage(mapping, newpage, page, mode); in move_to_new_page()
768 rc = fallback_migrate_page(mapping, newpage, page, mode); in move_to_new_page()
775 set_page_memcg(page, NULL); in move_to_new_page()
776 if (!PageAnon(page)) in move_to_new_page()
777 page->mapping = NULL; in move_to_new_page()
782 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
789 if (!trylock_page(page)) { in __unmap_and_move()
809 lock_page(page); in __unmap_and_move()
812 if (PageWriteback(page)) { in __unmap_and_move()
825 wait_on_page_writeback(page); in __unmap_and_move()
842 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
843 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
856 if (unlikely(isolated_balloon_page(page))) { in __unmap_and_move()
864 rc = balloon_page_migrate(newpage, page, mode); in __unmap_and_move()
880 if (!page->mapping) { in __unmap_and_move()
881 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
882 if (page_has_private(page)) { in __unmap_and_move()
883 try_to_free_buffers(page); in __unmap_and_move()
886 } else if (page_mapped(page)) { in __unmap_and_move()
888 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
889 page); in __unmap_and_move()
890 try_to_unmap(page, in __unmap_and_move()
895 if (!page_mapped(page)) in __unmap_and_move()
896 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
899 remove_migration_ptes(page, in __unmap_and_move()
900 rc == MIGRATEPAGE_SUCCESS ? newpage : page); in __unmap_and_move()
908 unlock_page(page); in __unmap_and_move()
929 unsigned long private, struct page *page, in unmap_and_move() argument
935 struct page *newpage; in unmap_and_move()
937 newpage = get_new_page(page, private, &result); in unmap_and_move()
941 if (page_count(page) == 1) { in unmap_and_move()
946 if (unlikely(PageTransHuge(page))) in unmap_and_move()
947 if (unlikely(split_huge_page(page))) in unmap_and_move()
950 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
962 list_del(&page->lru); in unmap_and_move()
963 dec_zone_page_state(page, NR_ISOLATED_ANON + in unmap_and_move()
964 page_is_file_cache(page)); in unmap_and_move()
973 put_page(page); in unmap_and_move()
974 if (!test_set_page_hwpoison(page)) in unmap_and_move()
977 putback_lru_page(page); in unmap_and_move()
1022 struct page *hpage, int force, in unmap_and_move_huge_page()
1028 struct page *new_hpage; in unmap_and_move_huge_page()
1136 struct page *page; in migrate_pages() local
1137 struct page *page2; in migrate_pages()
1147 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1150 if (PageHuge(page)) in migrate_pages()
1152 put_new_page, private, page, in migrate_pages()
1156 private, page, pass > 2, mode, in migrate_pages()
1201 struct page *page; member
1206 static struct page *new_page_node(struct page *p, unsigned long private, in new_page_node()
1211 while (pm->node != MAX_NUMNODES && pm->page != p) in new_page_node()
1248 struct page *page; in do_move_page_to_node_array() local
1256 page = follow_page(vma, pp->addr, in do_move_page_to_node_array()
1259 err = PTR_ERR(page); in do_move_page_to_node_array()
1260 if (IS_ERR(page)) in do_move_page_to_node_array()
1264 if (!page) in do_move_page_to_node_array()
1267 pp->page = page; in do_move_page_to_node_array()
1268 err = page_to_nid(page); in do_move_page_to_node_array()
1277 if (page_mapcount(page) > 1 && in do_move_page_to_node_array()
1281 if (PageHuge(page)) { in do_move_page_to_node_array()
1282 if (PageHead(page)) in do_move_page_to_node_array()
1283 isolate_huge_page(page, &pagelist); in do_move_page_to_node_array()
1287 err = isolate_lru_page(page); in do_move_page_to_node_array()
1289 list_add_tail(&page->lru, &pagelist); in do_move_page_to_node_array()
1290 inc_zone_page_state(page, NR_ISOLATED_ANON + in do_move_page_to_node_array()
1291 page_is_file_cache(page)); in do_move_page_to_node_array()
1299 put_page(page); in do_move_page_to_node_array()
1416 struct page *page; in do_pages_stat_array() local
1424 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1426 err = PTR_ERR(page); in do_pages_stat_array()
1427 if (IS_ERR(page)) in do_pages_stat_array()
1430 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
1576 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
1581 struct page *newpage; in alloc_misplaced_dst_page()
1632 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
1636 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
1639 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) in numamigrate_isolate_page()
1642 if (isolate_lru_page(page)) in numamigrate_isolate_page()
1652 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
1653 putback_lru_page(page); in numamigrate_isolate_page()
1657 page_lru = page_is_file_cache(page); in numamigrate_isolate_page()
1658 mod_zone_page_state(page_zone(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
1659 hpage_nr_pages(page)); in numamigrate_isolate_page()
1666 put_page(page); in numamigrate_isolate_page()
1672 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
1673 return PageLocked(page); in pmd_trans_migrating()
1681 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1693 if (page_mapcount(page) != 1 && page_is_file_cache(page) && in migrate_misplaced_page()
1705 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
1709 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
1715 list_del(&page->lru); in migrate_misplaced_page()
1716 dec_zone_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
1717 page_is_file_cache(page)); in migrate_misplaced_page()
1718 putback_lru_page(page); in migrate_misplaced_page()
1727 put_page(page); in migrate_misplaced_page()
1741 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
1746 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
1747 int page_lru = page_is_file_cache(page); in migrate_misplaced_transhuge_page()
1766 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
1780 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
1781 new_page->index = page->index; in migrate_misplaced_transhuge_page()
1782 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
1788 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) { in migrate_misplaced_transhuge_page()
1795 SetPageActive(page); in migrate_misplaced_transhuge_page()
1797 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
1803 get_page(page); in migrate_misplaced_transhuge_page()
1804 putback_lru_page(page); in migrate_misplaced_transhuge_page()
1805 mod_zone_page_state(page_zone(page), in migrate_misplaced_transhuge_page()
1830 if (page_count(page) != 2) { in migrate_misplaced_transhuge_page()
1839 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
1840 set_page_memcg(new_page, page_memcg(page)); in migrate_misplaced_transhuge_page()
1841 set_page_memcg(page, NULL); in migrate_misplaced_transhuge_page()
1842 page_remove_rmap(page); in migrate_misplaced_transhuge_page()
1852 unlock_page(page); in migrate_misplaced_transhuge_page()
1853 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
1854 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
1859 mod_zone_page_state(page_zone(page), in migrate_misplaced_transhuge_page()
1876 unlock_page(page); in migrate_misplaced_transhuge_page()
1877 put_page(page); in migrate_misplaced_transhuge_page()