Lines Matching refs:page
838 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
840 int nid = page_to_nid(page); in enqueue_huge_page()
841 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
846 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node()
848 struct page *page; in dequeue_huge_page_node() local
850 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
851 if (!is_migrate_isolate_page(page)) in dequeue_huge_page_node()
857 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
859 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
860 set_page_refcounted(page); in dequeue_huge_page_node()
863 return page; in dequeue_huge_page_node()
875 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma()
880 struct page *page = NULL; in dequeue_huge_page_vma() local
909 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
910 if (page) { in dequeue_huge_page_vma()
916 SetPagePrivate(page); in dequeue_huge_page_vma()
924 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) in dequeue_huge_page_vma()
926 return page; in dequeue_huge_page_vma()
1006 static void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1011 struct page *p = page + 1; in destroy_compound_gigantic_page()
1013 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page()
1018 set_compound_order(page, 0); in destroy_compound_gigantic_page()
1019 __ClearPageHead(page); in destroy_compound_gigantic_page()
1022 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
1024 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
1038 struct page *page; in pfn_range_valid_gigantic() local
1044 page = pfn_to_page(i); in pfn_range_valid_gigantic()
1046 if (PageReserved(page)) in pfn_range_valid_gigantic()
1049 if (page_count(page) > 0) in pfn_range_valid_gigantic()
1052 if (PageHuge(page)) in pfn_range_valid_gigantic()
1066 static struct page *alloc_gigantic_page(int nid, unsigned int order) in alloc_gigantic_page()
1101 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1102 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1104 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) in alloc_fresh_gigantic_page_node()
1106 struct page *page; in alloc_fresh_gigantic_page_node() local
1108 page = alloc_gigantic_page(nid, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
1109 if (page) { in alloc_fresh_gigantic_page_node()
1110 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
1111 prep_new_huge_page(h, page, nid); in alloc_fresh_gigantic_page_node()
1114 return page; in alloc_fresh_gigantic_page_node()
1120 struct page *page = NULL; in alloc_fresh_gigantic_page() local
1124 page = alloc_fresh_gigantic_page_node(h, node); in alloc_fresh_gigantic_page()
1125 if (page) in alloc_fresh_gigantic_page()
1135 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
1136 static inline void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
1142 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1150 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1152 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | in update_and_free_page()
1157 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); in update_and_free_page()
1158 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); in update_and_free_page()
1159 set_page_refcounted(page); in update_and_free_page()
1161 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1162 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1164 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1185 bool page_huge_active(struct page *page) in page_huge_active() argument
1187 VM_BUG_ON_PAGE(!PageHuge(page), page); in page_huge_active()
1188 return PageHead(page) && PagePrivate(&page[1]); in page_huge_active()
1192 static void set_page_huge_active(struct page *page) in set_page_huge_active() argument
1194 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in set_page_huge_active()
1195 SetPagePrivate(&page[1]); in set_page_huge_active()
1198 static void clear_page_huge_active(struct page *page) in clear_page_huge_active() argument
1200 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in clear_page_huge_active()
1201 ClearPagePrivate(&page[1]); in clear_page_huge_active()
1204 void free_huge_page(struct page *page) in free_huge_page() argument
1210 struct hstate *h = page_hstate(page); in free_huge_page()
1211 int nid = page_to_nid(page); in free_huge_page()
1213 (struct hugepage_subpool *)page_private(page); in free_huge_page()
1216 set_page_private(page, 0); in free_huge_page()
1217 page->mapping = NULL; in free_huge_page()
1218 BUG_ON(page_count(page)); in free_huge_page()
1219 BUG_ON(page_mapcount(page)); in free_huge_page()
1220 restore_reserve = PagePrivate(page); in free_huge_page()
1221 ClearPagePrivate(page); in free_huge_page()
1232 clear_page_huge_active(page); in free_huge_page()
1234 pages_per_huge_page(h), page); in free_huge_page()
1240 list_del(&page->lru); in free_huge_page()
1241 update_and_free_page(h, page); in free_huge_page()
1245 arch_clear_hugepage_flags(page); in free_huge_page()
1246 enqueue_huge_page(h, page); in free_huge_page()
1251 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1253 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page()
1254 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); in prep_new_huge_page()
1256 set_hugetlb_cgroup(page, NULL); in prep_new_huge_page()
1260 put_page(page); /* free it into the hugepage allocator */ in prep_new_huge_page()
1263 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
1267 struct page *p = page + 1; in prep_compound_gigantic_page()
1270 set_compound_order(page, order); in prep_compound_gigantic_page()
1271 __SetPageHead(page); in prep_compound_gigantic_page()
1272 __ClearPageReserved(page); in prep_compound_gigantic_page()
1273 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in prep_compound_gigantic_page()
1288 set_compound_head(p, page); in prep_compound_gigantic_page()
1297 int PageHuge(struct page *page) in PageHuge() argument
1299 if (!PageCompound(page)) in PageHuge()
1302 page = compound_head(page); in PageHuge()
1303 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; in PageHuge()
1311 int PageHeadHuge(struct page *page_head) in PageHeadHuge()
1319 pgoff_t __basepage_index(struct page *page) in __basepage_index() argument
1321 struct page *page_head = compound_head(page); in __basepage_index()
1326 return page_index(page); in __basepage_index()
1329 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in __basepage_index()
1331 compound_idx = page - page_head; in __basepage_index()
1336 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) in alloc_fresh_huge_page_node()
1338 struct page *page; in alloc_fresh_huge_page_node() local
1340 page = __alloc_pages_node(nid, in alloc_fresh_huge_page_node()
1344 if (page) { in alloc_fresh_huge_page_node()
1345 prep_new_huge_page(h, page, nid); in alloc_fresh_huge_page_node()
1348 return page; in alloc_fresh_huge_page_node()
1353 struct page *page; in alloc_fresh_huge_page() local
1358 page = alloc_fresh_huge_page_node(h, node); in alloc_fresh_huge_page()
1359 if (page) { in alloc_fresh_huge_page()
1392 struct page *page = in free_pool_huge_page() local
1394 struct page, lru); in free_pool_huge_page()
1395 list_del(&page->lru); in free_pool_huge_page()
1402 update_and_free_page(h, page); in free_pool_huge_page()
1415 static void dissolve_free_huge_page(struct page *page) in dissolve_free_huge_page() argument
1418 if (PageHuge(page) && !page_count(page)) { in dissolve_free_huge_page()
1419 struct hstate *h = page_hstate(page); in dissolve_free_huge_page()
1420 int nid = page_to_nid(page); in dissolve_free_huge_page()
1421 list_del(&page->lru); in dissolve_free_huge_page()
1424 update_and_free_page(h, page); in dissolve_free_huge_page()
1455 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h, in __hugetlb_alloc_buddy_huge_page()
1492 struct page *page; in __hugetlb_alloc_buddy_huge_page() local
1500 page = __alloc_pages_nodemask(gfp, order, zl, nodemask); in __hugetlb_alloc_buddy_huge_page()
1501 if (page) in __hugetlb_alloc_buddy_huge_page()
1502 return page; in __hugetlb_alloc_buddy_huge_page()
1520 static struct page *__alloc_buddy_huge_page(struct hstate *h, in __alloc_buddy_huge_page()
1523 struct page *page; in __alloc_buddy_huge_page() local
1571 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid); in __alloc_buddy_huge_page()
1574 if (page) { in __alloc_buddy_huge_page()
1575 INIT_LIST_HEAD(&page->lru); in __alloc_buddy_huge_page()
1576 r_nid = page_to_nid(page); in __alloc_buddy_huge_page()
1577 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); in __alloc_buddy_huge_page()
1578 set_hugetlb_cgroup(page, NULL); in __alloc_buddy_huge_page()
1592 return page; in __alloc_buddy_huge_page()
1601 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) in __alloc_buddy_huge_page_no_mpol()
1612 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, in __alloc_buddy_huge_page_with_mpol()
1623 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node()
1625 struct page *page = NULL; in alloc_huge_page_node() local
1629 page = dequeue_huge_page_node(h, nid); in alloc_huge_page_node()
1632 if (!page) in alloc_huge_page_node()
1633 page = __alloc_buddy_huge_page_no_mpol(h, nid); in alloc_huge_page_node()
1635 return page; in alloc_huge_page_node()
1645 struct page *page, *tmp; in gather_surplus_pages() local
1663 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE); in gather_surplus_pages()
1664 if (!page) { in gather_surplus_pages()
1668 list_add(&page->lru, &surplus_list); in gather_surplus_pages()
1702 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
1709 put_page_testzero(page); in gather_surplus_pages()
1710 VM_BUG_ON_PAGE(page_count(page), page); in gather_surplus_pages()
1711 enqueue_huge_page(h, page); in gather_surplus_pages()
1717 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
1718 put_page(page); in gather_surplus_pages()
1836 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page()
1841 struct page *page; in alloc_huge_page() local
1893 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
1894 if (!page) { in alloc_huge_page()
1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
1897 if (!page) in alloc_huge_page()
1900 SetPagePrivate(page); in alloc_huge_page()
1904 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
1907 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
1910 set_page_private(page, (unsigned long)spool); in alloc_huge_page()
1928 return page; in alloc_huge_page()
1944 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr()
1947 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr() local
1948 if (IS_ERR(page)) in alloc_huge_page_noerr()
1949 page = NULL; in alloc_huge_page_noerr()
1950 return page; in alloc_huge_page_noerr()
1984 static void __init prep_compound_huge_page(struct page *page, in prep_compound_huge_page() argument
1988 prep_compound_gigantic_page(page, order); in prep_compound_huge_page()
1990 prep_compound_page(page, order); in prep_compound_huge_page()
2000 struct page *page; in gather_bootmem_prealloc() local
2003 page = pfn_to_page(m->phys >> PAGE_SHIFT); in gather_bootmem_prealloc()
2007 page = virt_to_page(m); in gather_bootmem_prealloc()
2009 WARN_ON(page_count(page) != 1); in gather_bootmem_prealloc()
2010 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
2011 WARN_ON(PageReserved(page)); in gather_bootmem_prealloc()
2012 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2020 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
2087 struct page *page, *next; in try_to_free_low() local
2089 list_for_each_entry_safe(page, next, freel, lru) { in try_to_free_low()
2092 if (PageHighMem(page)) in try_to_free_low()
2094 list_del(&page->lru); in try_to_free_low()
2095 update_and_free_page(h, page); in try_to_free_low()
2097 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
3023 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3029 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, in make_huge_pte()
3032 entry = huge_pte_wrprotect(mk_huge_pte(page, in make_huge_pte()
3037 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3082 struct page *ptepage; in copy_hugetlb_page_range()
3158 struct page *ref_page) in __unmap_hugepage_range()
3166 struct page *page; in __unmap_hugepage_range() local
3202 page = pte_page(pte); in __unmap_hugepage_range()
3209 if (page != ref_page) in __unmap_hugepage_range()
3223 set_page_dirty(page); in __unmap_hugepage_range()
3226 page_remove_rmap(page); in __unmap_hugepage_range()
3227 force_flush = !__tlb_remove_page(tlb, page); in __unmap_hugepage_range()
3258 unsigned long end, struct page *ref_page) in __unmap_hugepage_range_final()
3276 unsigned long end, struct page *ref_page) in unmap_hugepage_range()
3295 struct page *page, unsigned long address) in unmap_ref_private() argument
3339 address + huge_page_size(h), page); in unmap_ref_private()
3352 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow()
3355 struct page *old_page, *new_page; in hugetlb_cow()
3472 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page()
3493 struct page *page; in hugetlbfs_pagecache_present() local
3498 page = find_get_page(mapping, idx); in hugetlbfs_pagecache_present()
3499 if (page) in hugetlbfs_pagecache_present()
3500 put_page(page); in hugetlbfs_pagecache_present()
3501 return page != NULL; in hugetlbfs_pagecache_present()
3504 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, in huge_add_to_page_cache() argument
3509 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); in huge_add_to_page_cache()
3513 ClearPagePrivate(page); in huge_add_to_page_cache()
3529 struct page *page; in hugetlb_no_page() local
3549 page = find_lock_page(mapping, idx); in hugetlb_no_page()
3550 if (!page) { in hugetlb_no_page()
3554 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3555 if (IS_ERR(page)) { in hugetlb_no_page()
3556 ret = PTR_ERR(page); in hugetlb_no_page()
3563 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
3564 __SetPageUptodate(page); in hugetlb_no_page()
3565 set_page_huge_active(page); in hugetlb_no_page()
3568 int err = huge_add_to_page_cache(page, mapping, idx); in hugetlb_no_page()
3570 put_page(page); in hugetlb_no_page()
3576 lock_page(page); in hugetlb_no_page()
3589 if (unlikely(PageHWPoison(page))) { in hugetlb_no_page()
3622 ClearPagePrivate(page); in hugetlb_no_page()
3623 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3625 page_dup_rmap(page); in hugetlb_no_page()
3626 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3633 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); in hugetlb_no_page()
3637 unlock_page(page); in hugetlb_no_page()
3644 unlock_page(page); in hugetlb_no_page()
3645 put_page(page); in hugetlb_no_page()
3692 struct page *page = NULL; in hugetlb_fault() local
3693 struct page *pagecache_page = NULL; in hugetlb_fault()
3776 page = pte_page(entry); in hugetlb_fault()
3777 if (page != pagecache_page) in hugetlb_fault()
3778 if (!trylock_page(page)) { in hugetlb_fault()
3783 get_page(page); in hugetlb_fault()
3798 if (page != pagecache_page) in hugetlb_fault()
3799 unlock_page(page); in hugetlb_fault()
3800 put_page(page); in hugetlb_fault()
3818 wait_on_page_locked(page); in hugetlb_fault()
3823 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
3836 struct page *page; in follow_hugetlb_page() local
3901 page = pte_page(huge_ptep_get(pte)); in follow_hugetlb_page()
3904 pages[i] = mem_map_offset(page, pfn_offset); in follow_hugetlb_page()
4329 struct page * __weak
4336 struct page * __weak
4340 struct page *page = NULL; in follow_huge_pmd() local
4352 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); in follow_huge_pmd()
4354 get_page(page); in follow_huge_pmd()
4368 return page; in follow_huge_pmd()
4371 struct page * __weak
4387 int dequeue_hwpoisoned_huge_page(struct page *hpage) in dequeue_hwpoisoned_huge_page()
4416 bool isolate_huge_page(struct page *page, struct list_head *list) in isolate_huge_page() argument
4420 VM_BUG_ON_PAGE(!PageHead(page), page); in isolate_huge_page()
4422 if (!page_huge_active(page) || !get_page_unless_zero(page)) { in isolate_huge_page()
4426 clear_page_huge_active(page); in isolate_huge_page()
4427 list_move_tail(&page->lru, list); in isolate_huge_page()
4433 void putback_active_hugepage(struct page *page) in putback_active_hugepage() argument
4435 VM_BUG_ON_PAGE(!PageHead(page), page); in putback_active_hugepage()
4437 set_page_huge_active(page); in putback_active_hugepage()
4438 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); in putback_active_hugepage()
4440 put_page(page); in putback_active_hugepage()