Lines Matching refs:page

136 static inline int get_pcppage_migratetype(struct page *page)  in get_pcppage_migratetype()  argument
138 return page->index; in get_pcppage_migratetype()
141 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument
143 page->index = migratetype; in set_pcppage_migratetype()
187 static void __free_pages_ok(struct page *page, unsigned int order);
232 static void free_compound_page(struct page *page);
338 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
344 set_pageblock_flags_group(page, (unsigned long)migratetype, in set_pageblock_migratetype()
349 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
353 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
372 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
374 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent()
376 if (zone != page_zone(page)) in page_is_consistent()
384 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
386 if (page_outside_zone_boundaries(zone, page)) in bad_range()
388 if (!page_is_consistent(zone, page)) in bad_range()
394 static inline int bad_range(struct zone *zone, struct page *page) in bad_range() argument
400 static void bad_page(struct page *page, const char *reason, in bad_page() argument
408 if (PageHWPoison(page)) { in bad_page()
409 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
434 current->comm, page_to_pfn(page)); in bad_page()
435 dump_page_badflags(page, reason, bad_flags); in bad_page()
441 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
460 static void free_compound_page(struct page *page) in free_compound_page() argument
462 __free_pages_ok(page, compound_order(page)); in free_compound_page()
465 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
470 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); in prep_compound_page()
471 set_compound_order(page, order); in prep_compound_page()
472 __SetPageHead(page); in prep_compound_page()
474 struct page *p = page + i; in prep_compound_page()
476 set_compound_head(p, page); in prep_compound_page()
533 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
541 page_ext = lookup_page_ext(page); in set_page_guard()
544 INIT_LIST_HEAD(&page->lru); in set_page_guard()
545 set_page_private(page, order); in set_page_guard()
550 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
558 page_ext = lookup_page_ext(page); in clear_page_guard()
561 set_page_private(page, 0); in clear_page_guard()
567 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
569 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
573 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
575 set_page_private(page, order); in set_page_order()
576 __SetPageBuddy(page); in set_page_order()
579 static inline void rmv_page_order(struct page *page) in rmv_page_order() argument
581 __ClearPageBuddy(page); in rmv_page_order()
582 set_page_private(page, 0); in rmv_page_order()
600 static inline int page_is_buddy(struct page *page, struct page *buddy, in page_is_buddy() argument
607 if (page_zone_id(page) != page_zone_id(buddy)) in page_is_buddy()
621 if (page_zone_id(page) != page_zone_id(buddy)) in page_is_buddy()
656 static inline void __free_one_page(struct page *page, in __free_one_page() argument
664 struct page *buddy; in __free_one_page()
670 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
678 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); in __free_one_page()
679 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
684 buddy = page + (buddy_idx - page_idx); in __free_one_page()
685 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
699 page = page + (combined_idx - page_idx); in __free_one_page()
716 buddy = page + (buddy_idx - page_idx); in __free_one_page()
729 set_page_order(page, order); in __free_one_page()
740 struct page *higher_page, *higher_buddy; in __free_one_page()
742 higher_page = page + (combined_idx - page_idx); in __free_one_page()
746 list_add_tail(&page->lru, in __free_one_page()
752 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
757 static inline int free_pages_check(struct page *page) in free_pages_check() argument
762 if (unlikely(page_mapcount(page))) in free_pages_check()
764 if (unlikely(page->mapping != NULL)) in free_pages_check()
766 if (unlikely(atomic_read(&page->_count) != 0)) in free_pages_check()
768 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { in free_pages_check()
773 if (unlikely(page->mem_cgroup)) in free_pages_check()
777 bad_page(page, bad_reason, bad_flags); in free_pages_check()
780 page_cpupid_reset_last(page); in free_pages_check()
781 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) in free_pages_check()
782 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_check()
811 struct page *page; in free_pcppages_bulk() local
835 page = list_entry(list->prev, struct page, lru); in free_pcppages_bulk()
837 list_del(&page->lru); in free_pcppages_bulk()
839 mt = get_pcppage_migratetype(page); in free_pcppages_bulk()
841 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); in free_pcppages_bulk()
844 mt = get_pageblock_migratetype(page); in free_pcppages_bulk()
846 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
847 trace_mm_page_pcpu_drain(page, 0, mt); in free_pcppages_bulk()
854 struct page *page, unsigned long pfn, in free_one_page() argument
866 migratetype = get_pfnblock_migratetype(page, pfn); in free_one_page()
868 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
872 static int free_tail_pages_check(struct page *head_page, struct page *page) in free_tail_pages_check() argument
886 if (unlikely(!PageTail(page))) { in free_tail_pages_check()
887 bad_page(page, "PageTail not set", 0); in free_tail_pages_check()
890 if (unlikely(compound_head(page) != head_page)) { in free_tail_pages_check()
891 bad_page(page, "compound_head not consistent", 0); in free_tail_pages_check()
896 clear_compound_head(page); in free_tail_pages_check()
900 static void __meminit __init_single_page(struct page *page, unsigned long pfn, in __init_single_page() argument
903 set_page_links(page, zone, nid, pfn); in __init_single_page()
904 init_page_count(page); in __init_single_page()
905 page_mapcount_reset(page); in __init_single_page()
906 page_cpupid_reset_last(page); in __init_single_page()
908 INIT_LIST_HEAD(&page->lru); in __init_single_page()
912 set_page_address(page, __va(pfn << PAGE_SHIFT)); in __init_single_page()
961 struct page *page = pfn_to_page(start_pfn); in reserve_bootmem_region() local
966 INIT_LIST_HEAD(&page->lru); in reserve_bootmem_region()
968 SetPageReserved(page); in reserve_bootmem_region()
973 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
975 bool compound = PageCompound(page); in free_pages_prepare()
978 VM_BUG_ON_PAGE(PageTail(page), page); in free_pages_prepare()
979 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
981 trace_mm_page_free(page, order); in free_pages_prepare()
982 kmemcheck_free_shadow(page, order); in free_pages_prepare()
983 kasan_free_pages(page, order); in free_pages_prepare()
985 if (PageAnon(page)) in free_pages_prepare()
986 page->mapping = NULL; in free_pages_prepare()
987 bad += free_pages_check(page); in free_pages_prepare()
990 bad += free_tail_pages_check(page, page + i); in free_pages_prepare()
991 bad += free_pages_check(page + i); in free_pages_prepare()
996 reset_page_owner(page, order); in free_pages_prepare()
998 if (!PageHighMem(page)) { in free_pages_prepare()
999 debug_check_no_locks_freed(page_address(page), in free_pages_prepare()
1001 debug_check_no_obj_freed(page_address(page), in free_pages_prepare()
1004 arch_free_page(page, order); in free_pages_prepare()
1005 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
1010 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
1014 unsigned long pfn = page_to_pfn(page); in __free_pages_ok()
1016 if (!free_pages_prepare(page, order)) in __free_pages_ok()
1019 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok()
1022 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
1026 static void __init __free_pages_boot_core(struct page *page, in __free_pages_boot_core() argument
1030 struct page *p = page; in __free_pages_boot_core()
1042 page_zone(page)->managed_pages += nr_pages; in __free_pages_boot_core()
1043 set_page_refcounted(page); in __free_pages_boot_core()
1044 __free_pages(page, order); in __free_pages_boot_core()
1099 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, in __free_pages_bootmem() argument
1104 return __free_pages_boot_core(page, pfn, order); in __free_pages_bootmem()
1108 static void __init deferred_free_range(struct page *page, in deferred_free_range() argument
1113 if (!page) in deferred_free_range()
1119 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in deferred_free_range()
1120 __free_pages_boot_core(page, pfn, MAX_ORDER-1); in deferred_free_range()
1124 for (i = 0; i < nr_pages; i++, page++, pfn++) in deferred_free_range()
1125 __free_pages_boot_core(page, pfn, 0); in deferred_free_range()
1175 struct page *page = NULL; in deferred_init_memmap() local
1176 struct page *free_base_page = NULL; in deferred_init_memmap()
1197 page = NULL; in deferred_init_memmap()
1203 page = NULL; in deferred_init_memmap()
1208 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { in deferred_init_memmap()
1209 page++; in deferred_init_memmap()
1217 page = pfn_to_page(pfn); in deferred_init_memmap()
1221 if (page->flags) { in deferred_init_memmap()
1222 VM_BUG_ON(page_zone(page) != zone); in deferred_init_memmap()
1226 __init_single_page(page, pfn, zid, nid); in deferred_init_memmap()
1228 free_base_page = page; in deferred_init_memmap()
1278 void __init init_cma_reserved_pageblock(struct page *page) in init_cma_reserved_pageblock() argument
1281 struct page *p = page; in init_cma_reserved_pageblock()
1288 set_pageblock_migratetype(page, MIGRATE_CMA); in init_cma_reserved_pageblock()
1292 p = page; in init_cma_reserved_pageblock()
1299 set_page_refcounted(page); in init_cma_reserved_pageblock()
1300 __free_pages(page, pageblock_order); in init_cma_reserved_pageblock()
1303 adjust_managed_page_count(page, pageblock_nr_pages); in init_cma_reserved_pageblock()
1321 static inline void expand(struct zone *zone, struct page *page, in expand() argument
1331 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1342 set_page_guard(zone, &page[size], high, migratetype); in expand()
1345 list_add(&page[size].lru, &area->free_list[migratetype]); in expand()
1347 set_page_order(&page[size], high); in expand()
1354 static inline int check_new_page(struct page *page) in check_new_page() argument
1359 if (unlikely(page_mapcount(page))) in check_new_page()
1361 if (unlikely(page->mapping != NULL)) in check_new_page()
1363 if (unlikely(atomic_read(&page->_count) != 0)) in check_new_page()
1365 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page()
1369 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { in check_new_page()
1374 if (unlikely(page->mem_cgroup)) in check_new_page()
1378 bad_page(page, bad_reason, bad_flags); in check_new_page()
1384 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1390 struct page *p = page + i; in prep_new_page()
1395 set_page_private(page, 0); in prep_new_page()
1396 set_page_refcounted(page); in prep_new_page()
1398 arch_alloc_page(page, order); in prep_new_page()
1399 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
1400 kasan_alloc_pages(page, order); in prep_new_page()
1404 clear_highpage(page + i); in prep_new_page()
1407 prep_compound_page(page, order); in prep_new_page()
1409 set_page_owner(page, order, gfp_flags); in prep_new_page()
1418 set_page_pfmemalloc(page); in prep_new_page()
1420 clear_page_pfmemalloc(page); in prep_new_page()
1430 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest()
1435 struct page *page; in __rmqueue_smallest() local
1443 page = list_entry(area->free_list[migratetype].next, in __rmqueue_smallest()
1444 struct page, lru); in __rmqueue_smallest()
1445 list_del(&page->lru); in __rmqueue_smallest()
1446 rmv_page_order(page); in __rmqueue_smallest()
1448 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1449 set_pcppage_migratetype(page, migratetype); in __rmqueue_smallest()
1450 return page; in __rmqueue_smallest()
1474 static struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1480 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1490 struct page *start_page, struct page *end_page, in move_freepages()
1493 struct page *page; in move_freepages() local
1508 for (page = start_page; page <= end_page;) { in move_freepages()
1510 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
1512 if (!pfn_valid_within(page_to_pfn(page))) { in move_freepages()
1513 page++; in move_freepages()
1517 if (!PageBuddy(page)) { in move_freepages()
1518 page++; in move_freepages()
1522 order = page_order(page); in move_freepages()
1523 list_move(&page->lru, in move_freepages()
1525 page += 1 << order; in move_freepages()
1532 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1536 struct page *start_page, *end_page; in move_freepages_block()
1538 start_pfn = page_to_pfn(page); in move_freepages_block()
1546 start_page = page; in move_freepages_block()
1553 static void change_pageblock_range(struct page *pageblock_page, in change_pageblock_range()
1604 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
1607 unsigned int current_order = page_order(page); in steal_suitable_fallback()
1612 change_pageblock_range(page, current_order, start_type); in steal_suitable_fallback()
1616 pages = move_freepages_block(zone, page, start_type); in steal_suitable_fallback()
1621 set_pageblock_migratetype(page, start_type); in steal_suitable_fallback()
1665 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
1686 mt = get_pageblock_migratetype(page); in reserve_highatomic_pageblock()
1690 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
1691 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
1710 struct page *page; in unreserve_highatomic_pageblock() local
1726 page = list_entry(area->free_list[MIGRATE_HIGHATOMIC].next, in unreserve_highatomic_pageblock()
1727 struct page, lru); in unreserve_highatomic_pageblock()
1747 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
1748 move_freepages_block(zone, page, ac->migratetype); in unreserve_highatomic_pageblock()
1757 static inline struct page *
1762 struct page *page; in __rmqueue_fallback() local
1776 page = list_entry(area->free_list[fallback_mt].next, in __rmqueue_fallback()
1777 struct page, lru); in __rmqueue_fallback()
1779 steal_suitable_fallback(zone, page, start_migratetype); in __rmqueue_fallback()
1783 list_del(&page->lru); in __rmqueue_fallback()
1784 rmv_page_order(page); in __rmqueue_fallback()
1786 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1795 set_pcppage_migratetype(page, start_migratetype); in __rmqueue_fallback()
1797 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1800 return page; in __rmqueue_fallback()
1810 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue()
1813 struct page *page; in __rmqueue() local
1815 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1816 if (unlikely(!page)) { in __rmqueue()
1818 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
1820 if (!page) in __rmqueue()
1821 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1824 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1825 return page; in __rmqueue()
1841 struct page *page = __rmqueue(zone, order, migratetype, 0); in rmqueue_bulk() local
1842 if (unlikely(page == NULL)) in rmqueue_bulk()
1855 list_add(&page->lru, list); in rmqueue_bulk()
1857 list_add_tail(&page->lru, list); in rmqueue_bulk()
1858 list = &page->lru; in rmqueue_bulk()
1859 if (is_migrate_cma(get_pcppage_migratetype(page))) in rmqueue_bulk()
2021 struct page *page = pfn_to_page(pfn); in mark_free_pages() local
2023 if (!swsusp_page_is_forbidden(page)) in mark_free_pages()
2024 swsusp_unset_page_free(page); in mark_free_pages()
2031 pfn = page_to_pfn(list_entry(curr, struct page, lru)); in mark_free_pages()
2044 void free_hot_cold_page(struct page *page, bool cold) in free_hot_cold_page() argument
2046 struct zone *zone = page_zone(page); in free_hot_cold_page()
2049 unsigned long pfn = page_to_pfn(page); in free_hot_cold_page()
2052 if (!free_pages_prepare(page, 0)) in free_hot_cold_page()
2055 migratetype = get_pfnblock_migratetype(page, pfn); in free_hot_cold_page()
2056 set_pcppage_migratetype(page, migratetype); in free_hot_cold_page()
2069 free_one_page(zone, page, pfn, 0, migratetype); in free_hot_cold_page()
2077 list_add(&page->lru, &pcp->lists[migratetype]); in free_hot_cold_page()
2079 list_add_tail(&page->lru, &pcp->lists[migratetype]); in free_hot_cold_page()
2096 struct page *page, *next; in free_hot_cold_page_list() local
2098 list_for_each_entry_safe(page, next, list, lru) { in free_hot_cold_page_list()
2099 trace_mm_page_free_batched(page, cold); in free_hot_cold_page_list()
2100 free_hot_cold_page(page, cold); in free_hot_cold_page_list()
2112 void split_page(struct page *page, unsigned int order) in split_page() argument
2117 VM_BUG_ON_PAGE(PageCompound(page), page); in split_page()
2118 VM_BUG_ON_PAGE(!page_count(page), page); in split_page()
2125 if (kmemcheck_page_is_tracked(page)) in split_page()
2126 split_page(virt_to_page(page[0].shadow), order); in split_page()
2129 gfp_mask = get_page_owner_gfp(page); in split_page()
2130 set_page_owner(page, 0, gfp_mask); in split_page()
2132 set_page_refcounted(page + i); in split_page()
2133 set_page_owner(page + i, 0, gfp_mask); in split_page()
2138 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2144 BUG_ON(!PageBuddy(page)); in __isolate_free_page()
2146 zone = page_zone(page); in __isolate_free_page()
2147 mt = get_pageblock_migratetype(page); in __isolate_free_page()
2159 list_del(&page->lru); in __isolate_free_page()
2161 rmv_page_order(page); in __isolate_free_page()
2163 set_page_owner(page, order, __GFP_MOVABLE); in __isolate_free_page()
2167 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2168 for (; page < endpage; page += pageblock_nr_pages) { in __isolate_free_page()
2169 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
2171 set_pageblock_migratetype(page, in __isolate_free_page()
2190 int split_free_page(struct page *page) in split_free_page() argument
2195 order = page_order(page); in split_free_page()
2197 nr_pages = __isolate_free_page(page, order); in split_free_page()
2202 set_page_refcounted(page); in split_free_page()
2203 split_page(page, order); in split_free_page()
2211 struct page *buffered_rmqueue(struct zone *preferred_zone, in buffered_rmqueue()
2216 struct page *page; in buffered_rmqueue() local
2235 page = list_entry(list->prev, struct page, lru); in buffered_rmqueue()
2237 page = list_entry(list->next, struct page, lru); in buffered_rmqueue()
2239 list_del(&page->lru); in buffered_rmqueue()
2257 page = NULL; in buffered_rmqueue()
2259 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in buffered_rmqueue()
2260 if (page) in buffered_rmqueue()
2261 trace_mm_page_alloc_zone_locked(page, order, migratetype); in buffered_rmqueue()
2263 if (!page) in buffered_rmqueue()
2264 page = __rmqueue(zone, order, migratetype, gfp_flags); in buffered_rmqueue()
2266 if (!page) in buffered_rmqueue()
2269 get_pcppage_migratetype(page)); in buffered_rmqueue()
2281 VM_BUG_ON_PAGE(bad_range(zone, page), page); in buffered_rmqueue()
2282 return page; in buffered_rmqueue()
2499 static struct page *
2505 struct page *page = NULL; in get_page_from_freelist() local
2601 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2603 if (page) { in get_page_from_freelist()
2604 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2612 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
2614 return page; in get_page_from_freelist()
2702 static inline struct page *
2712 struct page *page; in __alloc_pages_may_oom() local
2731 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2733 if (page) in __alloc_pages_may_oom()
2767 return page; in __alloc_pages_may_oom()
2772 static struct page *
2779 struct page *page; in __alloc_pages_direct_compact() local
2805 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2808 if (page) { in __alloc_pages_direct_compact()
2809 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact()
2814 return page; in __alloc_pages_direct_compact()
2828 static inline struct page *
2868 static inline struct page *
2873 struct page *page = NULL; in __alloc_pages_direct_reclaim() local
2881 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2889 if (!page && !drained) { in __alloc_pages_direct_reclaim()
2896 return page; in __alloc_pages_direct_reclaim()
2903 static inline struct page *
2907 struct page *page; in __alloc_pages_high_priority() local
2910 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2913 if (!page && gfp_mask & __GFP_NOFAIL) in __alloc_pages_high_priority()
2916 } while (!page && (gfp_mask & __GFP_NOFAIL)); in __alloc_pages_high_priority()
2918 return page; in __alloc_pages_high_priority()
2989 static inline struct page *
2994 struct page *page = NULL; in __alloc_pages_slowpath() local
3052 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
3054 if (page) in __alloc_pages_slowpath()
3066 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
3068 if (page) { in __alloc_pages_slowpath()
3096 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3100 if (page) in __alloc_pages_slowpath()
3143 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3145 if (page) in __alloc_pages_slowpath()
3162 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
3163 if (page) in __alloc_pages_slowpath()
3176 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, in __alloc_pages_slowpath()
3180 if (page) in __alloc_pages_slowpath()
3185 return page; in __alloc_pages_slowpath()
3191 struct page *
3196 struct page *page = NULL; in __alloc_pages_nodemask() local
3245 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
3246 if (unlikely(!page)) { in __alloc_pages_nodemask()
3255 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
3258 if (kmemcheck_enabled && page) in __alloc_pages_nodemask()
3259 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
3261 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
3270 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) in __alloc_pages_nodemask()
3273 return page; in __alloc_pages_nodemask()
3282 struct page *page; in __get_free_pages() local
3290 page = alloc_pages(gfp_mask, order); in __get_free_pages()
3291 if (!page) in __get_free_pages()
3293 return (unsigned long) page_address(page); in __get_free_pages()
3303 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
3305 if (put_page_testzero(page)) { in __free_pages()
3307 free_hot_cold_page(page, false); in __free_pages()
3309 __free_pages_ok(page, order); in __free_pages()
3336 static struct page *__page_frag_refill(struct page_frag_cache *nc, in __page_frag_refill()
3339 struct page *page = NULL; in __page_frag_refill() local
3345 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_refill()
3347 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; in __page_frag_refill()
3349 if (unlikely(!page)) in __page_frag_refill()
3350 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); in __page_frag_refill()
3352 nc->va = page ? page_address(page) : NULL; in __page_frag_refill()
3354 return page; in __page_frag_refill()
3361 struct page *page; in __alloc_page_frag() local
3366 page = __page_frag_refill(nc, gfp_mask); in __alloc_page_frag()
3367 if (!page) in __alloc_page_frag()
3377 atomic_add(size - 1, &page->_count); in __alloc_page_frag()
3380 nc->pfmemalloc = page_is_pfmemalloc(page); in __alloc_page_frag()
3387 page = virt_to_page(nc->va); in __alloc_page_frag()
3389 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count)) in __alloc_page_frag()
3397 atomic_set(&page->_count, size); in __alloc_page_frag()
3416 struct page *page = virt_to_head_page(addr); in __free_page_frag() local
3418 if (unlikely(put_page_testzero(page))) in __free_page_frag()
3419 __free_pages_ok(page, compound_order(page)); in __free_page_frag()
3430 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages()
3432 struct page *page; in alloc_kmem_pages() local
3434 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3435 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages()
3436 __free_pages(page, order); in alloc_kmem_pages()
3437 page = NULL; in alloc_kmem_pages()
3439 return page; in alloc_kmem_pages()
3442 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node()
3444 struct page *page; in alloc_kmem_pages_node() local
3446 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3447 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages_node()
3448 __free_pages(page, order); in alloc_kmem_pages_node()
3449 page = NULL; in alloc_kmem_pages_node()
3451 return page; in alloc_kmem_pages_node()
3458 void __free_kmem_pages(struct page *page, unsigned int order) in __free_kmem_pages() argument
3460 memcg_kmem_uncharge(page, order); in __free_kmem_pages()
3461 __free_pages(page, order); in __free_kmem_pages()
3524 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
4530 struct page *page = pfn_to_page(pfn); in memmap_init_zone() local
4532 __init_single_page(page, pfn, zone, nid); in memmap_init_zone()
4533 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in memmap_init_zone()
5185 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; in calc_memmap_size()
5303 struct page *map; in alloc_node_mem_map()
5312 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
5799 void adjust_managed_page_count(struct page *page, long count) in adjust_managed_page_count() argument
5802 page_zone(page)->managed_pages += count; in adjust_managed_page_count()
5805 if (PageHighMem(page)) in adjust_managed_page_count()
5834 void free_highmem_page(struct page *page) in free_highmem_page() argument
5836 __free_reserved_page(page); in free_highmem_page()
5838 page_zone(page)->managed_pages++; in free_highmem_page()
6455 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, in get_pfnblock_flags_mask() argument
6464 zone = page_zone(page); in get_pfnblock_flags_mask()
6483 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, in set_pfnblock_flags_mask() argument
6495 zone = page_zone(page); in set_pfnblock_flags_mask()
6501 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); in set_pfnblock_flags_mask()
6524 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
6536 mt = get_pageblock_migratetype(page); in has_unmovable_pages()
6540 pfn = page_to_pfn(page); in has_unmovable_pages()
6547 page = pfn_to_page(check); in has_unmovable_pages()
6554 if (PageHuge(page)) { in has_unmovable_pages()
6555 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; in has_unmovable_pages()
6565 if (!atomic_read(&page->_count)) { in has_unmovable_pages()
6566 if (PageBuddy(page)) in has_unmovable_pages()
6567 iter += (1 << page_order(page)) - 1; in has_unmovable_pages()
6575 if (skip_hwpoisoned_pages && PageHWPoison(page)) in has_unmovable_pages()
6578 if (!PageLRU(page)) in has_unmovable_pages()
6599 bool is_pageblock_removable_nolock(struct page *page) in is_pageblock_removable_nolock() argument
6611 if (!node_online(page_to_nid(page))) in is_pageblock_removable_nolock()
6614 zone = page_zone(page); in is_pageblock_removable_nolock()
6615 pfn = page_to_pfn(page); in is_pageblock_removable_nolock()
6619 return !has_unmovable_pages(zone, page, 0, true); in is_pageblock_removable_nolock()
6813 struct page *page = pfn_to_page(pfn); in free_contig_range() local
6815 count += page_count(page) != 1; in free_contig_range()
6816 __free_page(page); in free_contig_range()
6864 struct page *page; in __offline_isolated_pages() local
6883 page = pfn_to_page(pfn); in __offline_isolated_pages()
6888 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { in __offline_isolated_pages()
6890 SetPageReserved(page); in __offline_isolated_pages()
6894 BUG_ON(page_count(page)); in __offline_isolated_pages()
6895 BUG_ON(!PageBuddy(page)); in __offline_isolated_pages()
6896 order = page_order(page); in __offline_isolated_pages()
6901 list_del(&page->lru); in __offline_isolated_pages()
6902 rmv_page_order(page); in __offline_isolated_pages()
6905 SetPageReserved((page+i)); in __offline_isolated_pages()
6913 bool is_free_buddy_page(struct page *page) in is_free_buddy_page() argument
6915 struct zone *zone = page_zone(page); in is_free_buddy_page()
6916 unsigned long pfn = page_to_pfn(page); in is_free_buddy_page()
6922 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()