Lines Matching refs:page

172 struct page *huge_zero_page __read_mostly;
179 static struct page *get_huge_zero_page(void) in get_huge_zero_page()
181 struct page *zero_page; in get_huge_zero_page()
226 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
709 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) in mk_huge_pmd() argument
712 entry = mk_pmd(page, prot); in mk_huge_pmd()
720 struct page *page, gfp_t gfp) in __do_huge_pmd_anonymous_page() argument
726 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page()
728 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) in __do_huge_pmd_anonymous_page()
733 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
737 clear_huge_page(page, haddr, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
743 __SetPageUptodate(page); in __do_huge_pmd_anonymous_page()
748 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
749 put_page(page); in __do_huge_pmd_anonymous_page()
753 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
755 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
756 mem_cgroup_commit_charge(page, memcg, false); in __do_huge_pmd_anonymous_page()
757 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
776 struct page *zero_page) in set_huge_zero_page()
794 struct page *page; in do_huge_pmd_anonymous_page() local
807 struct page *zero_page; in do_huge_pmd_anonymous_page()
829 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
830 if (unlikely(!page)) { in do_huge_pmd_anonymous_page()
834 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { in do_huge_pmd_anonymous_page()
835 put_page(page); in do_huge_pmd_anonymous_page()
849 struct page *src_page; in copy_huge_pmd()
875 struct page *zero_page; in copy_huge_pmd()
948 static void get_user_huge_page(struct page *page) in get_user_huge_page() argument
951 struct page *endpage = page + HPAGE_PMD_NR; in get_user_huge_page()
953 atomic_add(HPAGE_PMD_NR, &page->_count); in get_user_huge_page()
954 while (++page < endpage) in get_user_huge_page()
955 get_huge_page_tail(page); in get_user_huge_page()
957 get_page(page); in get_user_huge_page()
961 static void put_user_huge_page(struct page *page) in put_user_huge_page() argument
964 struct page *endpage = page + HPAGE_PMD_NR; in put_user_huge_page()
966 while (page < endpage) in put_user_huge_page()
967 put_page(page++); in put_user_huge_page()
969 put_page(page); in put_user_huge_page()
977 struct page *page, in do_huge_pmd_wp_page_fallback() argument
985 struct page **pages; in do_huge_pmd_wp_page_fallback()
989 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, in do_huge_pmd_wp_page_fallback()
999 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback()
1019 copy_user_highpage(pages[i], page + i, in do_huge_pmd_wp_page_fallback()
1032 VM_BUG_ON_PAGE(!PageHead(page), page); in do_huge_pmd_wp_page_fallback()
1058 page_remove_rmap(page); in do_huge_pmd_wp_page_fallback()
1064 put_page(page); in do_huge_pmd_wp_page_fallback()
1087 struct page *page = NULL, *new_page; in do_huge_pmd_wp_page() local
1103 page = pmd_page(orig_pmd); in do_huge_pmd_wp_page()
1104 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); in do_huge_pmd_wp_page()
1105 if (page_mapcount(page) == 1) { in do_huge_pmd_wp_page()
1114 get_user_huge_page(page); in do_huge_pmd_wp_page()
1125 if (!page) { in do_huge_pmd_wp_page()
1130 pmd, orig_pmd, page, haddr); in do_huge_pmd_wp_page()
1132 split_huge_page(page); in do_huge_pmd_wp_page()
1135 put_user_huge_page(page); in do_huge_pmd_wp_page()
1143 if (page) { in do_huge_pmd_wp_page()
1144 split_huge_page(page); in do_huge_pmd_wp_page()
1145 put_user_huge_page(page); in do_huge_pmd_wp_page()
1155 if (!page) in do_huge_pmd_wp_page()
1158 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1166 if (page) in do_huge_pmd_wp_page()
1167 put_user_huge_page(page); in do_huge_pmd_wp_page()
1183 if (!page) { in do_huge_pmd_wp_page()
1187 VM_BUG_ON_PAGE(!PageHead(page), page); in do_huge_pmd_wp_page()
1188 page_remove_rmap(page); in do_huge_pmd_wp_page()
1189 put_page(page); in do_huge_pmd_wp_page()
1203 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd()
1209 struct page *page = NULL; in follow_trans_huge_pmd() local
1224 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1225 VM_BUG_ON_PAGE(!PageHead(page), page); in follow_trans_huge_pmd()
1242 if (page->mapping && trylock_page(page)) { in follow_trans_huge_pmd()
1244 if (page->mapping) in follow_trans_huge_pmd()
1245 mlock_vma_page(page); in follow_trans_huge_pmd()
1246 unlock_page(page); in follow_trans_huge_pmd()
1249 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; in follow_trans_huge_pmd()
1250 VM_BUG_ON_PAGE(!PageCompound(page), page); in follow_trans_huge_pmd()
1252 get_page_foll(page); in follow_trans_huge_pmd()
1255 return page; in follow_trans_huge_pmd()
1264 struct page *page; in do_huge_pmd_numa_page() local
1286 page = pmd_page(*pmdp); in do_huge_pmd_numa_page()
1288 wait_on_page_locked(page); in do_huge_pmd_numa_page()
1292 page = pmd_page(pmd); in do_huge_pmd_numa_page()
1293 BUG_ON(is_huge_zero_page(page)); in do_huge_pmd_numa_page()
1294 page_nid = page_to_nid(page); in do_huge_pmd_numa_page()
1295 last_cpupid = page_cpupid_last(page); in do_huge_pmd_numa_page()
1310 page_locked = trylock_page(page); in do_huge_pmd_numa_page()
1311 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1321 wait_on_page_locked(page); in do_huge_pmd_numa_page()
1330 get_page(page); in do_huge_pmd_numa_page()
1332 anon_vma = page_lock_anon_vma_read(page); in do_huge_pmd_numa_page()
1337 unlock_page(page); in do_huge_pmd_numa_page()
1338 put_page(page); in do_huge_pmd_numa_page()
1345 put_page(page); in do_huge_pmd_numa_page()
1356 pmdp, pmd, addr, page, target_nid); in do_huge_pmd_numa_page()
1365 BUG_ON(!PageLocked(page)); in do_huge_pmd_numa_page()
1373 unlock_page(page); in do_huge_pmd_numa_page()
1394 struct page *page; in zap_huge_pmd() local
1412 page = pmd_page(orig_pmd); in zap_huge_pmd()
1413 page_remove_rmap(page); in zap_huge_pmd()
1414 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); in zap_huge_pmd()
1416 VM_BUG_ON_PAGE(!PageHead(page), page); in zap_huge_pmd()
1419 tlb_remove_page(tlb, page); in zap_huge_pmd()
1556 pmd_t *page_check_address_pmd(struct page *page, in page_check_address_pmd() argument
1580 if (pmd_page(*pmd) != page) in page_check_address_pmd()
1602 static int __split_huge_page_splitting(struct page *page, in __split_huge_page_splitting() argument
1615 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_splitting()
1635 static void __split_huge_page_refcount(struct page *page, in __split_huge_page_refcount() argument
1639 struct zone *zone = page_zone(page); in __split_huge_page_refcount()
1645 lruvec = mem_cgroup_page_lruvec(page, zone); in __split_huge_page_refcount()
1647 compound_lock(page); in __split_huge_page_refcount()
1649 mem_cgroup_split_huge_fixup(page); in __split_huge_page_refcount()
1652 struct page *page_tail = page + i; in __split_huge_page_refcount()
1673 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, in __split_huge_page_refcount()
1680 page_tail->flags |= (page->flags & in __split_huge_page_refcount()
1706 page_tail->_mapcount = page->_mapcount; in __split_huge_page_refcount()
1709 page_tail->mapping = page->mapping; in __split_huge_page_refcount()
1711 page_tail->index = page->index + i; in __split_huge_page_refcount()
1712 page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); in __split_huge_page_refcount()
1719 lru_add_page_tail(page, page_tail, lruvec, list); in __split_huge_page_refcount()
1721 atomic_sub(tail_count, &page->_count); in __split_huge_page_refcount()
1722 BUG_ON(atomic_read(&page->_count) <= 0); in __split_huge_page_refcount()
1726 ClearPageCompound(page); in __split_huge_page_refcount()
1727 compound_unlock(page); in __split_huge_page_refcount()
1731 struct page *page_tail = page + i; in __split_huge_page_refcount()
1747 BUG_ON(page_count(page) <= 0); in __split_huge_page_refcount()
1750 static int __split_huge_page_map(struct page *page, in __split_huge_page_map() argument
1761 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_map()
1767 BUG_ON(page_mapcount(page) != 1); in __split_huge_page_map()
1772 BUG_ON(PageCompound(page+i)); in __split_huge_page_map()
1778 entry = mk_pte(page + i, vma->vm_page_prot); in __split_huge_page_map()
1827 static void __split_huge_page(struct page *page, in __split_huge_page() argument
1832 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); in __split_huge_page()
1835 BUG_ON(!PageHead(page)); in __split_huge_page()
1836 BUG_ON(PageTail(page)); in __split_huge_page()
1841 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1843 mapcount += __split_huge_page_splitting(page, vma, addr); in __split_huge_page()
1855 if (mapcount != page_mapcount(page)) { in __split_huge_page()
1857 mapcount, page_mapcount(page)); in __split_huge_page()
1861 __split_huge_page_refcount(page, list); in __split_huge_page()
1866 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1868 mapcount2 += __split_huge_page_map(page, vma, addr); in __split_huge_page()
1872 mapcount, mapcount2, page_mapcount(page)); in __split_huge_page()
1884 int split_huge_page_to_list(struct page *page, struct list_head *list) in split_huge_page_to_list() argument
1889 BUG_ON(is_huge_zero_page(page)); in split_huge_page_to_list()
1890 BUG_ON(!PageAnon(page)); in split_huge_page_to_list()
1899 anon_vma = page_get_anon_vma(page); in split_huge_page_to_list()
1905 if (!PageCompound(page)) in split_huge_page_to_list()
1908 BUG_ON(!PageSwapBacked(page)); in split_huge_page_to_list()
1909 __split_huge_page(page, anon_vma, list); in split_huge_page_to_list()
1912 BUG_ON(PageCompound(page)); in split_huge_page_to_list()
2106 static void release_pte_page(struct page *page) in release_pte_page() argument
2109 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); in release_pte_page()
2110 unlock_page(page); in release_pte_page()
2111 putback_lru_page(page); in release_pte_page()
2127 struct page *page; in __collapse_huge_page_isolate() local
2143 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
2144 if (unlikely(!page)) in __collapse_huge_page_isolate()
2147 VM_BUG_ON_PAGE(PageCompound(page), page); in __collapse_huge_page_isolate()
2148 VM_BUG_ON_PAGE(!PageAnon(page), page); in __collapse_huge_page_isolate()
2149 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in __collapse_huge_page_isolate()
2157 if (!trylock_page(page)) in __collapse_huge_page_isolate()
2165 if (page_count(page) != 1 + !!PageSwapCache(page)) { in __collapse_huge_page_isolate()
2166 unlock_page(page); in __collapse_huge_page_isolate()
2172 if (PageSwapCache(page) && !reuse_swap_page(page)) { in __collapse_huge_page_isolate()
2173 unlock_page(page); in __collapse_huge_page_isolate()
2186 if (isolate_lru_page(page)) { in __collapse_huge_page_isolate()
2187 unlock_page(page); in __collapse_huge_page_isolate()
2191 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); in __collapse_huge_page_isolate()
2192 VM_BUG_ON_PAGE(!PageLocked(page), page); in __collapse_huge_page_isolate()
2193 VM_BUG_ON_PAGE(PageLRU(page), page); in __collapse_huge_page_isolate()
2196 if (pte_young(pteval) || PageReferenced(page) || in __collapse_huge_page_isolate()
2207 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, in __collapse_huge_page_copy() argument
2215 struct page *src_page; in __collapse_huge_page_copy()
2218 clear_user_highpage(page, address); in __collapse_huge_page_copy()
2234 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
2254 page++; in __collapse_huge_page_copy()
2316 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
2333 static struct page *
2334 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page()
2364 static inline struct page *alloc_hugepage(int defrag) in alloc_hugepage()
2370 static struct page *khugepaged_alloc_hugepage(bool *wait) in khugepaged_alloc_hugepage()
2372 struct page *hpage; in khugepaged_alloc_hugepage()
2390 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
2401 static struct page *
2402 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page()
2428 struct page **hpage, in collapse_huge_page()
2435 struct page *new_page; in collapse_huge_page()
2565 struct page **hpage) in khugepaged_scan_pmd()
2570 struct page *page; in khugepaged_scan_pmd() local
2598 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
2599 if (unlikely(!page)) in khugepaged_scan_pmd()
2607 node = page_to_nid(page); in khugepaged_scan_pmd()
2611 VM_BUG_ON_PAGE(PageCompound(page), page); in khugepaged_scan_pmd()
2612 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) in khugepaged_scan_pmd()
2619 if (page_count(page) != 1 + !!PageSwapCache(page)) in khugepaged_scan_pmd()
2621 if (pte_young(pteval) || PageReferenced(page) || in khugepaged_scan_pmd()
2662 struct page **hpage) in khugepaged_scan_mm_slot()
2783 struct page *hpage = NULL; in khugepaged_do_scan()
2886 struct page *page; in __split_huge_page_pmd() local
2910 page = pmd_page(*pmd); in __split_huge_page_pmd()
2911 VM_BUG_ON_PAGE(!page_count(page), page); in __split_huge_page_pmd()
2912 get_page(page); in __split_huge_page_pmd()
2916 split_huge_page(page); in __split_huge_page_pmd()
2918 put_page(page); in __split_huge_page_pmd()