Lines Matching refs:page
589 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
591 int nid = page_to_nid(page); in enqueue_huge_page()
592 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
597 static struct page *dequeue_huge_page_node(struct hstate *h, int nid) in dequeue_huge_page_node()
599 struct page *page; in dequeue_huge_page_node() local
601 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) in dequeue_huge_page_node()
602 if (!is_migrate_isolate_page(page)) in dequeue_huge_page_node()
608 if (&h->hugepage_freelists[nid] == &page->lru) in dequeue_huge_page_node()
610 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node()
611 set_page_refcounted(page); in dequeue_huge_page_node()
614 return page; in dequeue_huge_page_node()
626 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma()
631 struct page *page = NULL; in dequeue_huge_page_vma() local
660 page = dequeue_huge_page_node(h, zone_to_nid(zone)); in dequeue_huge_page_vma()
661 if (page) { in dequeue_huge_page_vma()
667 SetPagePrivate(page); in dequeue_huge_page_vma()
675 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) in dequeue_huge_page_vma()
677 return page; in dequeue_huge_page_vma()
757 static void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
762 struct page *p = page + 1; in destroy_compound_gigantic_page()
764 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in destroy_compound_gigantic_page()
770 set_compound_order(page, 0); in destroy_compound_gigantic_page()
771 __ClearPageHead(page); in destroy_compound_gigantic_page()
774 static void free_gigantic_page(struct page *page, unsigned int order) in free_gigantic_page() argument
776 free_contig_range(page_to_pfn(page), 1 << order); in free_gigantic_page()
790 struct page *page; in pfn_range_valid_gigantic() local
796 page = pfn_to_page(i); in pfn_range_valid_gigantic()
798 if (PageReserved(page)) in pfn_range_valid_gigantic()
801 if (page_count(page) > 0) in pfn_range_valid_gigantic()
804 if (PageHuge(page)) in pfn_range_valid_gigantic()
818 static struct page *alloc_gigantic_page(int nid, unsigned int order) in alloc_gigantic_page()
853 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
854 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
856 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid) in alloc_fresh_gigantic_page_node()
858 struct page *page; in alloc_fresh_gigantic_page_node() local
860 page = alloc_gigantic_page(nid, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
861 if (page) { in alloc_fresh_gigantic_page_node()
862 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_gigantic_page_node()
863 prep_new_huge_page(h, page, nid); in alloc_fresh_gigantic_page_node()
866 return page; in alloc_fresh_gigantic_page_node()
872 struct page *page = NULL; in alloc_fresh_gigantic_page() local
876 page = alloc_fresh_gigantic_page_node(h, node); in alloc_fresh_gigantic_page()
877 if (page) in alloc_fresh_gigantic_page()
887 static inline void free_gigantic_page(struct page *page, unsigned int order) { } in free_gigantic_page() argument
888 static inline void destroy_compound_gigantic_page(struct page *page, in destroy_compound_gigantic_page() argument
894 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
902 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
904 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | in update_and_free_page()
909 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); in update_and_free_page()
910 set_compound_page_dtor(page, NULL); in update_and_free_page()
911 set_page_refcounted(page); in update_and_free_page()
913 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
914 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
916 arch_release_hugepage(page); in update_and_free_page()
917 __free_pages(page, huge_page_order(h)); in update_and_free_page()
938 bool page_huge_active(struct page *page) in page_huge_active() argument
940 VM_BUG_ON_PAGE(!PageHuge(page), page); in page_huge_active()
941 return PageHead(page) && PagePrivate(&page[1]); in page_huge_active()
945 static void set_page_huge_active(struct page *page) in set_page_huge_active() argument
947 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in set_page_huge_active()
948 SetPagePrivate(&page[1]); in set_page_huge_active()
951 static void clear_page_huge_active(struct page *page) in clear_page_huge_active() argument
953 VM_BUG_ON_PAGE(!PageHeadHuge(page), page); in clear_page_huge_active()
954 ClearPagePrivate(&page[1]); in clear_page_huge_active()
957 void free_huge_page(struct page *page) in free_huge_page() argument
963 struct hstate *h = page_hstate(page); in free_huge_page()
964 int nid = page_to_nid(page); in free_huge_page()
966 (struct hugepage_subpool *)page_private(page); in free_huge_page()
969 set_page_private(page, 0); in free_huge_page()
970 page->mapping = NULL; in free_huge_page()
971 BUG_ON(page_count(page)); in free_huge_page()
972 BUG_ON(page_mapcount(page)); in free_huge_page()
973 restore_reserve = PagePrivate(page); in free_huge_page()
974 ClearPagePrivate(page); in free_huge_page()
985 clear_page_huge_active(page); in free_huge_page()
987 pages_per_huge_page(h), page); in free_huge_page()
993 list_del(&page->lru); in free_huge_page()
994 update_and_free_page(h, page); in free_huge_page()
998 arch_clear_hugepage_flags(page); in free_huge_page()
999 enqueue_huge_page(h, page); in free_huge_page()
1004 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1006 INIT_LIST_HEAD(&page->lru); in prep_new_huge_page()
1007 set_compound_page_dtor(page, free_huge_page); in prep_new_huge_page()
1009 set_hugetlb_cgroup(page, NULL); in prep_new_huge_page()
1013 put_page(page); /* free it into the hugepage allocator */ in prep_new_huge_page()
1016 static void prep_compound_gigantic_page(struct page *page, unsigned int order) in prep_compound_gigantic_page() argument
1020 struct page *p = page + 1; in prep_compound_gigantic_page()
1023 set_compound_order(page, order); in prep_compound_gigantic_page()
1024 __SetPageHead(page); in prep_compound_gigantic_page()
1025 __ClearPageReserved(page); in prep_compound_gigantic_page()
1026 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { in prep_compound_gigantic_page()
1041 p->first_page = page; in prep_compound_gigantic_page()
1053 int PageHuge(struct page *page) in PageHuge() argument
1055 if (!PageCompound(page)) in PageHuge()
1058 page = compound_head(page); in PageHuge()
1059 return get_compound_page_dtor(page) == free_huge_page; in PageHuge()
1067 int PageHeadHuge(struct page *page_head) in PageHeadHuge()
1075 pgoff_t __basepage_index(struct page *page) in __basepage_index() argument
1077 struct page *page_head = compound_head(page); in __basepage_index()
1082 return page_index(page); in __basepage_index()
1085 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); in __basepage_index()
1087 compound_idx = page - page_head; in __basepage_index()
1092 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) in alloc_fresh_huge_page_node()
1094 struct page *page; in alloc_fresh_huge_page_node() local
1096 page = alloc_pages_exact_node(nid, in alloc_fresh_huge_page_node()
1100 if (page) { in alloc_fresh_huge_page_node()
1101 if (arch_prepare_hugepage(page)) { in alloc_fresh_huge_page_node()
1102 __free_pages(page, huge_page_order(h)); in alloc_fresh_huge_page_node()
1105 prep_new_huge_page(h, page, nid); in alloc_fresh_huge_page_node()
1108 return page; in alloc_fresh_huge_page_node()
1113 struct page *page; in alloc_fresh_huge_page() local
1118 page = alloc_fresh_huge_page_node(h, node); in alloc_fresh_huge_page()
1119 if (page) { in alloc_fresh_huge_page()
1152 struct page *page = in free_pool_huge_page() local
1154 struct page, lru); in free_pool_huge_page()
1155 list_del(&page->lru); in free_pool_huge_page()
1162 update_and_free_page(h, page); in free_pool_huge_page()
1175 static void dissolve_free_huge_page(struct page *page) in dissolve_free_huge_page() argument
1178 if (PageHuge(page) && !page_count(page)) { in dissolve_free_huge_page()
1179 struct hstate *h = page_hstate(page); in dissolve_free_huge_page()
1180 int nid = page_to_nid(page); in dissolve_free_huge_page()
1181 list_del(&page->lru); in dissolve_free_huge_page()
1184 update_and_free_page(h, page); in dissolve_free_huge_page()
1206 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) in alloc_buddy_huge_page()
1208 struct page *page; in alloc_buddy_huge_page() local
1248 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| in alloc_buddy_huge_page()
1252 page = alloc_pages_exact_node(nid, in alloc_buddy_huge_page()
1256 if (page && arch_prepare_hugepage(page)) { in alloc_buddy_huge_page()
1257 __free_pages(page, huge_page_order(h)); in alloc_buddy_huge_page()
1258 page = NULL; in alloc_buddy_huge_page()
1262 if (page) { in alloc_buddy_huge_page()
1263 INIT_LIST_HEAD(&page->lru); in alloc_buddy_huge_page()
1264 r_nid = page_to_nid(page); in alloc_buddy_huge_page()
1265 set_compound_page_dtor(page, free_huge_page); in alloc_buddy_huge_page()
1266 set_hugetlb_cgroup(page, NULL); in alloc_buddy_huge_page()
1280 return page; in alloc_buddy_huge_page()
1288 struct page *alloc_huge_page_node(struct hstate *h, int nid) in alloc_huge_page_node()
1290 struct page *page = NULL; in alloc_huge_page_node() local
1294 page = dequeue_huge_page_node(h, nid); in alloc_huge_page_node()
1297 if (!page) in alloc_huge_page_node()
1298 page = alloc_buddy_huge_page(h, nid); in alloc_huge_page_node()
1300 return page; in alloc_huge_page_node()
1310 struct page *page, *tmp; in gather_surplus_pages() local
1328 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); in gather_surplus_pages()
1329 if (!page) { in gather_surplus_pages()
1333 list_add(&page->lru, &surplus_list); in gather_surplus_pages()
1367 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { in gather_surplus_pages()
1374 put_page_testzero(page); in gather_surplus_pages()
1375 VM_BUG_ON_PAGE(page_count(page), page); in gather_surplus_pages()
1376 enqueue_huge_page(h, page); in gather_surplus_pages()
1382 list_for_each_entry_safe(page, tmp, &surplus_list, lru) in gather_surplus_pages()
1383 put_page(page); in gather_surplus_pages()
1467 static struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page()
1472 struct page *page; in alloc_huge_page() local
1498 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); in alloc_huge_page()
1499 if (!page) { in alloc_huge_page()
1501 page = alloc_buddy_huge_page(h, NUMA_NO_NODE); in alloc_huge_page()
1502 if (!page) in alloc_huge_page()
1506 list_move(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
1509 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
1512 set_page_private(page, (unsigned long)spool); in alloc_huge_page()
1515 return page; in alloc_huge_page()
1530 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr()
1533 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr() local
1534 if (IS_ERR(page)) in alloc_huge_page_noerr()
1535 page = NULL; in alloc_huge_page_noerr()
1536 return page; in alloc_huge_page_noerr()
1570 static void __init prep_compound_huge_page(struct page *page, in prep_compound_huge_page() argument
1574 prep_compound_gigantic_page(page, order); in prep_compound_huge_page()
1576 prep_compound_page(page, order); in prep_compound_huge_page()
1586 struct page *page; in gather_bootmem_prealloc() local
1589 page = pfn_to_page(m->phys >> PAGE_SHIFT); in gather_bootmem_prealloc()
1593 page = virt_to_page(m); in gather_bootmem_prealloc()
1595 WARN_ON(page_count(page) != 1); in gather_bootmem_prealloc()
1596 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
1597 WARN_ON(PageReserved(page)); in gather_bootmem_prealloc()
1598 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
1606 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
1673 struct page *page, *next; in try_to_free_low() local
1675 list_for_each_entry_safe(page, next, freel, lru) { in try_to_free_low()
1678 if (PageHighMem(page)) in try_to_free_low()
1680 list_del(&page->lru); in try_to_free_low()
1681 update_and_free_page(h, page); in try_to_free_low()
1683 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
2603 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
2609 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, in make_huge_pte()
2612 entry = huge_pte_wrprotect(mk_huge_pte(page, in make_huge_pte()
2617 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
2662 struct page *ptepage; in copy_hugetlb_page_range()
2737 struct page *ref_page) in __unmap_hugepage_range()
2745 struct page *page; in __unmap_hugepage_range() local
2781 page = pte_page(pte); in __unmap_hugepage_range()
2788 if (page != ref_page) in __unmap_hugepage_range()
2802 set_page_dirty(page); in __unmap_hugepage_range()
2804 page_remove_rmap(page); in __unmap_hugepage_range()
2805 force_flush = !__tlb_remove_page(tlb, page); in __unmap_hugepage_range()
2836 unsigned long end, struct page *ref_page) in __unmap_hugepage_range_final()
2854 unsigned long end, struct page *ref_page) in unmap_hugepage_range()
2873 struct page *page, unsigned long address) in unmap_ref_private() argument
2917 address + huge_page_size(h), page); in unmap_ref_private()
2930 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow()
2933 struct page *old_page, *new_page; in hugetlb_cow()
3050 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page()
3071 struct page *page; in hugetlbfs_pagecache_present() local
3076 page = find_get_page(mapping, idx); in hugetlbfs_pagecache_present()
3077 if (page) in hugetlbfs_pagecache_present()
3078 put_page(page); in hugetlbfs_pagecache_present()
3079 return page != NULL; in hugetlbfs_pagecache_present()
3090 struct page *page; in hugetlb_no_page() local
3110 page = find_lock_page(mapping, idx); in hugetlb_no_page()
3111 if (!page) { in hugetlb_no_page()
3115 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3116 if (IS_ERR(page)) { in hugetlb_no_page()
3117 ret = PTR_ERR(page); in hugetlb_no_page()
3124 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
3125 __SetPageUptodate(page); in hugetlb_no_page()
3126 set_page_huge_active(page); in hugetlb_no_page()
3132 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); in hugetlb_no_page()
3134 put_page(page); in hugetlb_no_page()
3139 ClearPagePrivate(page); in hugetlb_no_page()
3145 lock_page(page); in hugetlb_no_page()
3158 if (unlikely(PageHWPoison(page))) { in hugetlb_no_page()
3188 ClearPagePrivate(page); in hugetlb_no_page()
3189 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3191 page_dup_rmap(page); in hugetlb_no_page()
3192 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3198 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); in hugetlb_no_page()
3202 unlock_page(page); in hugetlb_no_page()
3209 unlock_page(page); in hugetlb_no_page()
3210 put_page(page); in hugetlb_no_page()
3257 struct page *page = NULL; in hugetlb_fault() local
3258 struct page *pagecache_page = NULL; in hugetlb_fault()
3339 page = pte_page(entry); in hugetlb_fault()
3340 if (page != pagecache_page) in hugetlb_fault()
3341 if (!trylock_page(page)) { in hugetlb_fault()
3346 get_page(page); in hugetlb_fault()
3361 if (page != pagecache_page) in hugetlb_fault()
3362 unlock_page(page); in hugetlb_fault()
3363 put_page(page); in hugetlb_fault()
3381 wait_on_page_locked(page); in hugetlb_fault()
3386 struct page **pages, struct vm_area_struct **vmas, in follow_hugetlb_page()
3399 struct page *page; in follow_hugetlb_page() local
3464 page = pte_page(huge_ptep_get(pte)); in follow_hugetlb_page()
3467 pages[i] = mem_map_offset(page, pfn_offset); in follow_hugetlb_page()
3857 struct page * __weak
3864 struct page * __weak
3868 struct page *page = NULL; in follow_huge_pmd() local
3880 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); in follow_huge_pmd()
3882 get_page(page); in follow_huge_pmd()
3896 return page; in follow_huge_pmd()
3899 struct page * __weak
3915 int dequeue_hwpoisoned_huge_page(struct page *hpage) in dequeue_hwpoisoned_huge_page()
3944 bool isolate_huge_page(struct page *page, struct list_head *list) in isolate_huge_page() argument
3948 VM_BUG_ON_PAGE(!PageHead(page), page); in isolate_huge_page()
3950 if (!page_huge_active(page) || !get_page_unless_zero(page)) { in isolate_huge_page()
3954 clear_page_huge_active(page); in isolate_huge_page()
3955 list_move_tail(&page->lru, list); in isolate_huge_page()
3961 void putback_active_hugepage(struct page *page) in putback_active_hugepage() argument
3963 VM_BUG_ON_PAGE(!PageHead(page), page); in putback_active_hugepage()
3965 set_page_huge_active(page); in putback_active_hugepage()
3966 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); in putback_active_hugepage()
3968 put_page(page); in putback_active_hugepage()