Lines Matching refs:newpage

313 		struct page *newpage, struct page *page,  in migrate_page_move_mapping()  argument
328 set_page_memcg(newpage, page_memcg(page)); in migrate_page_move_mapping()
329 newpage->index = page->index; in migrate_page_move_mapping()
330 newpage->mapping = page->mapping; in migrate_page_move_mapping()
332 SetPageSwapBacked(newpage); in migrate_page_move_mapping()
338 newzone = page_zone(newpage); in migrate_page_move_mapping()
375 set_page_memcg(newpage, page_memcg(page)); in migrate_page_move_mapping()
376 newpage->index = page->index; in migrate_page_move_mapping()
377 newpage->mapping = page->mapping; in migrate_page_move_mapping()
379 SetPageSwapBacked(newpage); in migrate_page_move_mapping()
381 get_page(newpage); /* add cache reference */ in migrate_page_move_mapping()
383 SetPageSwapCache(newpage); in migrate_page_move_mapping()
384 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
391 SetPageDirty(newpage); in migrate_page_move_mapping()
394 radix_tree_replace_slot(pslot, newpage); in migrate_page_move_mapping()
438 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
460 set_page_memcg(newpage, page_memcg(page)); in migrate_huge_page_move_mapping()
461 newpage->index = page->index; in migrate_huge_page_move_mapping()
462 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
463 get_page(newpage); in migrate_huge_page_move_mapping()
465 radix_tree_replace_slot(pslot, newpage); in migrate_huge_page_move_mapping()
524 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
529 copy_huge_page(newpage, page); in migrate_page_copy()
531 copy_highpage(newpage, page); in migrate_page_copy()
534 SetPageError(newpage); in migrate_page_copy()
536 SetPageReferenced(newpage); in migrate_page_copy()
538 SetPageUptodate(newpage); in migrate_page_copy()
541 SetPageActive(newpage); in migrate_page_copy()
543 SetPageUnevictable(newpage); in migrate_page_copy()
545 SetPageChecked(newpage); in migrate_page_copy()
547 SetPageMappedToDisk(newpage); in migrate_page_copy()
551 SetPageDirty(newpage); in migrate_page_copy()
554 set_page_young(newpage); in migrate_page_copy()
556 set_page_idle(newpage); in migrate_page_copy()
563 page_cpupid_xchg_last(newpage, cpupid); in migrate_page_copy()
565 ksm_migrate_page(newpage, page); in migrate_page_copy()
579 if (PageWriteback(newpage)) in migrate_page_copy()
580 end_page_writeback(newpage); in migrate_page_copy()
594 struct page *newpage, struct page *page, in migrate_page() argument
601 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); in migrate_page()
606 migrate_page_copy(newpage, page); in migrate_page()
618 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
624 return migrate_page(mapping, newpage, page, mode); in buffer_migrate_page()
628 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); in buffer_migrate_page()
642 set_page_private(newpage, page_private(page)); in buffer_migrate_page()
645 get_page(newpage); in buffer_migrate_page()
649 set_bh_page(bh, newpage, bh_offset(bh)); in buffer_migrate_page()
654 SetPagePrivate(newpage); in buffer_migrate_page()
656 migrate_page_copy(newpage, page); in buffer_migrate_page()
716 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
733 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
747 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
754 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); in move_to_new_page()
758 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
766 rc = mapping->a_ops->migratepage(mapping, newpage, page, mode); in move_to_new_page()
768 rc = fallback_migrate_page(mapping, newpage, page, mode); in move_to_new_page()
782 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
853 if (unlikely(!trylock_page(newpage))) in __unmap_and_move()
864 rc = balloon_page_migrate(newpage, page, mode); in __unmap_and_move()
896 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
900 rc == MIGRATEPAGE_SUCCESS ? newpage : page); in __unmap_and_move()
903 unlock_page(newpage); in __unmap_and_move()
935 struct page *newpage; in unmap_and_move() local
937 newpage = get_new_page(page, private, &result); in unmap_and_move()
938 if (!newpage) in unmap_and_move()
950 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
986 put_new_page(newpage, private); in unmap_and_move()
987 else if (unlikely(__is_movable_balloon_page(newpage))) { in unmap_and_move()
989 put_page(newpage); in unmap_and_move()
991 putback_lru_page(newpage); in unmap_and_move()
997 *result = page_to_nid(newpage); in unmap_and_move()
1581 struct page *newpage; in alloc_misplaced_dst_page() local
1583 newpage = __alloc_pages_node(nid, in alloc_misplaced_dst_page()
1589 return newpage; in alloc_misplaced_dst_page()