Lines Matching refs:page
174 struct page *huge_zero_page __read_mostly;
176 struct page *get_huge_zero_page(void) in get_huge_zero_page()
178 struct page *zero_page; in get_huge_zero_page()
223 struct page *zero_page = xchg(&huge_zero_page, NULL); in shrink_huge_zero_page_scan()
706 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) in mk_huge_pmd() argument
709 entry = mk_pmd(page, prot); in mk_huge_pmd()
717 struct page *page, gfp_t gfp, in __do_huge_pmd_anonymous_page() argument
725 VM_BUG_ON_PAGE(!PageCompound(page), page); in __do_huge_pmd_anonymous_page()
727 if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) { in __do_huge_pmd_anonymous_page()
728 put_page(page); in __do_huge_pmd_anonymous_page()
735 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
736 put_page(page); in __do_huge_pmd_anonymous_page()
740 clear_huge_page(page, haddr, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
746 __SetPageUptodate(page); in __do_huge_pmd_anonymous_page()
751 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
752 put_page(page); in __do_huge_pmd_anonymous_page()
762 mem_cgroup_cancel_charge(page, memcg); in __do_huge_pmd_anonymous_page()
763 put_page(page); in __do_huge_pmd_anonymous_page()
771 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
773 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
774 mem_cgroup_commit_charge(page, memcg, false); in __do_huge_pmd_anonymous_page()
775 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
795 struct page *zero_page) in set_huge_zero_page()
813 struct page *page; in do_huge_pmd_anonymous_page() local
826 struct page *zero_page; in do_huge_pmd_anonymous_page()
863 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
864 if (unlikely(!page)) { in do_huge_pmd_anonymous_page()
868 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, in do_huge_pmd_anonymous_page()
920 struct page *src_page; in copy_huge_pmd()
946 struct page *zero_page; in copy_huge_pmd()
1017 static void get_user_huge_page(struct page *page) in get_user_huge_page() argument
1020 struct page *endpage = page + HPAGE_PMD_NR; in get_user_huge_page()
1022 atomic_add(HPAGE_PMD_NR, &page->_count); in get_user_huge_page()
1023 while (++page < endpage) in get_user_huge_page()
1024 get_huge_page_tail(page); in get_user_huge_page()
1026 get_page(page); in get_user_huge_page()
1030 static void put_user_huge_page(struct page *page) in put_user_huge_page() argument
1033 struct page *endpage = page + HPAGE_PMD_NR; in put_user_huge_page()
1035 while (page < endpage) in put_user_huge_page()
1036 put_page(page++); in put_user_huge_page()
1038 put_page(page); in put_user_huge_page()
1046 struct page *page, in do_huge_pmd_wp_page_fallback() argument
1054 struct page **pages; in do_huge_pmd_wp_page_fallback()
1058 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, in do_huge_pmd_wp_page_fallback()
1068 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback()
1088 copy_user_highpage(pages[i], page + i, in do_huge_pmd_wp_page_fallback()
1101 VM_BUG_ON_PAGE(!PageHead(page), page); in do_huge_pmd_wp_page_fallback()
1127 page_remove_rmap(page); in do_huge_pmd_wp_page_fallback()
1133 put_page(page); in do_huge_pmd_wp_page_fallback()
1156 struct page *page = NULL, *new_page; in do_huge_pmd_wp_page() local
1172 page = pmd_page(orig_pmd); in do_huge_pmd_wp_page()
1173 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); in do_huge_pmd_wp_page()
1174 if (page_mapcount(page) == 1) { in do_huge_pmd_wp_page()
1183 get_user_huge_page(page); in do_huge_pmd_wp_page()
1194 if (!page) { in do_huge_pmd_wp_page()
1199 pmd, orig_pmd, page, haddr); in do_huge_pmd_wp_page()
1201 split_huge_page(page); in do_huge_pmd_wp_page()
1204 put_user_huge_page(page); in do_huge_pmd_wp_page()
1212 if (page) { in do_huge_pmd_wp_page()
1213 split_huge_page(page); in do_huge_pmd_wp_page()
1214 put_user_huge_page(page); in do_huge_pmd_wp_page()
1224 if (!page) in do_huge_pmd_wp_page()
1227 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1235 if (page) in do_huge_pmd_wp_page()
1236 put_user_huge_page(page); in do_huge_pmd_wp_page()
1252 if (!page) { in do_huge_pmd_wp_page()
1256 VM_BUG_ON_PAGE(!PageHead(page), page); in do_huge_pmd_wp_page()
1257 page_remove_rmap(page); in do_huge_pmd_wp_page()
1258 put_page(page); in do_huge_pmd_wp_page()
1272 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd()
1278 struct page *page = NULL; in follow_trans_huge_pmd() local
1293 page = pmd_page(*pmd); in follow_trans_huge_pmd()
1294 VM_BUG_ON_PAGE(!PageHead(page), page); in follow_trans_huge_pmd()
1311 if (page->mapping && trylock_page(page)) { in follow_trans_huge_pmd()
1313 if (page->mapping) in follow_trans_huge_pmd()
1314 mlock_vma_page(page); in follow_trans_huge_pmd()
1315 unlock_page(page); in follow_trans_huge_pmd()
1318 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; in follow_trans_huge_pmd()
1319 VM_BUG_ON_PAGE(!PageCompound(page), page); in follow_trans_huge_pmd()
1321 get_page_foll(page); in follow_trans_huge_pmd()
1324 return page; in follow_trans_huge_pmd()
1333 struct page *page; in do_huge_pmd_numa_page() local
1355 page = pmd_page(*pmdp); in do_huge_pmd_numa_page()
1357 wait_on_page_locked(page); in do_huge_pmd_numa_page()
1361 page = pmd_page(pmd); in do_huge_pmd_numa_page()
1362 BUG_ON(is_huge_zero_page(page)); in do_huge_pmd_numa_page()
1363 page_nid = page_to_nid(page); in do_huge_pmd_numa_page()
1364 last_cpupid = page_cpupid_last(page); in do_huge_pmd_numa_page()
1379 page_locked = trylock_page(page); in do_huge_pmd_numa_page()
1380 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1390 wait_on_page_locked(page); in do_huge_pmd_numa_page()
1399 get_page(page); in do_huge_pmd_numa_page()
1401 anon_vma = page_lock_anon_vma_read(page); in do_huge_pmd_numa_page()
1406 unlock_page(page); in do_huge_pmd_numa_page()
1407 put_page(page); in do_huge_pmd_numa_page()
1414 put_page(page); in do_huge_pmd_numa_page()
1425 pmdp, pmd, addr, page, target_nid); in do_huge_pmd_numa_page()
1434 BUG_ON(!PageLocked(page)); in do_huge_pmd_numa_page()
1442 unlock_page(page); in do_huge_pmd_numa_page()
1483 struct page *page = pmd_page(orig_pmd); in zap_huge_pmd() local
1484 page_remove_rmap(page); in zap_huge_pmd()
1485 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); in zap_huge_pmd()
1487 VM_BUG_ON_PAGE(!PageHead(page), page); in zap_huge_pmd()
1491 tlb_remove_page(tlb, page); in zap_huge_pmd()
1625 pmd_t *page_check_address_pmd(struct page *page, in page_check_address_pmd() argument
1649 if (pmd_page(*pmd) != page) in page_check_address_pmd()
1671 static int __split_huge_page_splitting(struct page *page, in __split_huge_page_splitting() argument
1684 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_splitting()
1704 static void __split_huge_page_refcount(struct page *page, in __split_huge_page_refcount() argument
1708 struct zone *zone = page_zone(page); in __split_huge_page_refcount()
1714 lruvec = mem_cgroup_page_lruvec(page, zone); in __split_huge_page_refcount()
1716 compound_lock(page); in __split_huge_page_refcount()
1718 mem_cgroup_split_huge_fixup(page); in __split_huge_page_refcount()
1721 struct page *page_tail = page + i; in __split_huge_page_refcount()
1742 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1, in __split_huge_page_refcount()
1749 page_tail->flags |= (page->flags & in __split_huge_page_refcount()
1760 if (page_is_young(page)) in __split_huge_page_refcount()
1762 if (page_is_idle(page)) in __split_huge_page_refcount()
1779 page_tail->_mapcount = page->_mapcount; in __split_huge_page_refcount()
1782 page_tail->mapping = page->mapping; in __split_huge_page_refcount()
1784 page_tail->index = page->index + i; in __split_huge_page_refcount()
1785 page_cpupid_xchg_last(page_tail, page_cpupid_last(page)); in __split_huge_page_refcount()
1792 lru_add_page_tail(page, page_tail, lruvec, list); in __split_huge_page_refcount()
1794 atomic_sub(tail_count, &page->_count); in __split_huge_page_refcount()
1795 BUG_ON(atomic_read(&page->_count) <= 0); in __split_huge_page_refcount()
1799 ClearPageCompound(page); in __split_huge_page_refcount()
1800 compound_unlock(page); in __split_huge_page_refcount()
1804 struct page *page_tail = page + i; in __split_huge_page_refcount()
1820 BUG_ON(page_count(page) <= 0); in __split_huge_page_refcount()
1823 static int __split_huge_page_map(struct page *page, in __split_huge_page_map() argument
1834 pmd = page_check_address_pmd(page, mm, address, in __split_huge_page_map()
1840 BUG_ON(page_mapcount(page) != 1); in __split_huge_page_map()
1845 BUG_ON(PageCompound(page+i)); in __split_huge_page_map()
1851 entry = mk_pte(page + i, vma->vm_page_prot); in __split_huge_page_map()
1900 static void __split_huge_page(struct page *page, in __split_huge_page() argument
1905 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); in __split_huge_page()
1908 BUG_ON(!PageHead(page)); in __split_huge_page()
1909 BUG_ON(PageTail(page)); in __split_huge_page()
1914 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1916 mapcount += __split_huge_page_splitting(page, vma, addr); in __split_huge_page()
1928 if (mapcount != page_mapcount(page)) { in __split_huge_page()
1930 mapcount, page_mapcount(page)); in __split_huge_page()
1934 __split_huge_page_refcount(page, list); in __split_huge_page()
1939 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1941 mapcount2 += __split_huge_page_map(page, vma, addr); in __split_huge_page()
1945 mapcount, mapcount2, page_mapcount(page)); in __split_huge_page()
1957 int split_huge_page_to_list(struct page *page, struct list_head *list) in split_huge_page_to_list() argument
1962 BUG_ON(is_huge_zero_page(page)); in split_huge_page_to_list()
1963 BUG_ON(!PageAnon(page)); in split_huge_page_to_list()
1972 anon_vma = page_get_anon_vma(page); in split_huge_page_to_list()
1978 if (!PageCompound(page)) in split_huge_page_to_list()
1981 BUG_ON(!PageSwapBacked(page)); in split_huge_page_to_list()
1982 __split_huge_page(page, anon_vma, list); in split_huge_page_to_list()
1985 BUG_ON(PageCompound(page)); in split_huge_page_to_list()
2179 static void release_pte_page(struct page *page) in release_pte_page() argument
2182 dec_zone_page_state(page, NR_ISOLATED_ANON + 0); in release_pte_page()
2183 unlock_page(page); in release_pte_page()
2184 putback_lru_page(page); in release_pte_page()
2200 struct page *page; in __collapse_huge_page_isolate() local
2217 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
2218 if (unlikely(!page)) in __collapse_huge_page_isolate()
2221 VM_BUG_ON_PAGE(PageCompound(page), page); in __collapse_huge_page_isolate()
2222 VM_BUG_ON_PAGE(!PageAnon(page), page); in __collapse_huge_page_isolate()
2223 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in __collapse_huge_page_isolate()
2231 if (!trylock_page(page)) in __collapse_huge_page_isolate()
2239 if (page_count(page) != 1 + !!PageSwapCache(page)) { in __collapse_huge_page_isolate()
2240 unlock_page(page); in __collapse_huge_page_isolate()
2246 if (PageSwapCache(page) && !reuse_swap_page(page)) { in __collapse_huge_page_isolate()
2247 unlock_page(page); in __collapse_huge_page_isolate()
2260 if (isolate_lru_page(page)) { in __collapse_huge_page_isolate()
2261 unlock_page(page); in __collapse_huge_page_isolate()
2265 inc_zone_page_state(page, NR_ISOLATED_ANON + 0); in __collapse_huge_page_isolate()
2266 VM_BUG_ON_PAGE(!PageLocked(page), page); in __collapse_huge_page_isolate()
2267 VM_BUG_ON_PAGE(PageLRU(page), page); in __collapse_huge_page_isolate()
2271 page_is_young(page) || PageReferenced(page) || in __collapse_huge_page_isolate()
2282 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, in __collapse_huge_page_copy() argument
2290 struct page *src_page; in __collapse_huge_page_copy()
2293 clear_user_highpage(page, address); in __collapse_huge_page_copy()
2309 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
2329 page++; in __collapse_huge_page_copy()
2395 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
2412 static struct page *
2413 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page()
2442 static inline struct page *alloc_hugepage(int defrag) in alloc_hugepage()
2448 static struct page *khugepaged_alloc_hugepage(bool *wait) in khugepaged_alloc_hugepage()
2450 struct page *hpage; in khugepaged_alloc_hugepage()
2468 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
2479 static struct page *
2480 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, in khugepaged_alloc_page()
2505 struct page **hpage, in collapse_huge_page()
2512 struct page *new_page; in collapse_huge_page()
2642 struct page **hpage) in khugepaged_scan_pmd()
2647 struct page *page; in khugepaged_scan_pmd() local
2676 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
2677 if (unlikely(!page)) in khugepaged_scan_pmd()
2685 node = page_to_nid(page); in khugepaged_scan_pmd()
2689 VM_BUG_ON_PAGE(PageCompound(page), page); in khugepaged_scan_pmd()
2690 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) in khugepaged_scan_pmd()
2697 if (page_count(page) != 1 + !!PageSwapCache(page)) in khugepaged_scan_pmd()
2700 page_is_young(page) || PageReferenced(page) || in khugepaged_scan_pmd()
2741 struct page **hpage) in khugepaged_scan_mm_slot()
2862 struct page *hpage = NULL; in khugepaged_do_scan()
2963 struct page *page = NULL; in __split_huge_page_pmd() local
2985 page = pmd_page(*pmd); in __split_huge_page_pmd()
2986 VM_BUG_ON_PAGE(!page_count(page), page); in __split_huge_page_pmd()
2987 get_page(page); in __split_huge_page_pmd()
2993 if (!page) in __split_huge_page_pmd()
2996 split_huge_page(page); in __split_huge_page_pmd()
2997 put_page(page); in __split_huge_page_pmd()