Lines Matching refs:page
168 static void __free_pages_ok(struct page *page, unsigned int order);
238 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
244 set_pageblock_flags_group(page, (unsigned long)migratetype, in set_pageblock_migratetype()
249 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
253 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
272 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
274 if (!pfn_valid_within(page_to_pfn(page))) in page_is_consistent()
276 if (zone != page_zone(page)) in page_is_consistent()
284 static int bad_range(struct zone *zone, struct page *page) in bad_range() argument
286 if (page_outside_zone_boundaries(zone, page)) in bad_range()
288 if (!page_is_consistent(zone, page)) in bad_range()
294 static inline int bad_range(struct zone *zone, struct page *page) in bad_range() argument
300 static void bad_page(struct page *page, const char *reason, in bad_page() argument
308 if (PageHWPoison(page)) { in bad_page()
309 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
334 current->comm, page_to_pfn(page)); in bad_page()
335 dump_page_badflags(page, reason, bad_flags); in bad_page()
341 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
360 static void free_compound_page(struct page *page) in free_compound_page() argument
362 __free_pages_ok(page, compound_order(page)); in free_compound_page()
365 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
370 set_compound_page_dtor(page, free_compound_page); in prep_compound_page()
371 set_compound_order(page, order); in prep_compound_page()
372 __SetPageHead(page); in prep_compound_page()
374 struct page *p = page + i; in prep_compound_page()
376 p->first_page = page; in prep_compound_page()
383 static inline void prep_zero_page(struct page *page, unsigned int order, in prep_zero_page() argument
394 clear_highpage(page + i); in prep_zero_page()
450 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
458 page_ext = lookup_page_ext(page); in set_page_guard()
461 INIT_LIST_HEAD(&page->lru); in set_page_guard()
462 set_page_private(page, order); in set_page_guard()
467 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
475 page_ext = lookup_page_ext(page); in clear_page_guard()
478 set_page_private(page, 0); in clear_page_guard()
484 static inline void set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
486 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
490 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
492 set_page_private(page, order); in set_page_order()
493 __SetPageBuddy(page); in set_page_order()
496 static inline void rmv_page_order(struct page *page) in rmv_page_order() argument
498 __ClearPageBuddy(page); in rmv_page_order()
499 set_page_private(page, 0); in rmv_page_order()
517 static inline int page_is_buddy(struct page *page, struct page *buddy, in page_is_buddy() argument
524 if (page_zone_id(page) != page_zone_id(buddy)) in page_is_buddy()
538 if (page_zone_id(page) != page_zone_id(buddy)) in page_is_buddy()
573 static inline void __free_one_page(struct page *page, in __free_one_page() argument
581 struct page *buddy; in __free_one_page()
587 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
595 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); in __free_one_page()
596 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
601 buddy = page + (buddy_idx - page_idx); in __free_one_page()
602 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
616 page = page + (combined_idx - page_idx); in __free_one_page()
633 buddy = page + (buddy_idx - page_idx); in __free_one_page()
646 set_page_order(page, order); in __free_one_page()
657 struct page *higher_page, *higher_buddy; in __free_one_page()
659 higher_page = page + (combined_idx - page_idx); in __free_one_page()
663 list_add_tail(&page->lru, in __free_one_page()
669 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
674 static inline int free_pages_check(struct page *page) in free_pages_check() argument
679 if (unlikely(page_mapcount(page))) in free_pages_check()
681 if (unlikely(page->mapping != NULL)) in free_pages_check()
683 if (unlikely(atomic_read(&page->_count) != 0)) in free_pages_check()
685 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { in free_pages_check()
690 if (unlikely(page->mem_cgroup)) in free_pages_check()
694 bad_page(page, bad_reason, bad_flags); in free_pages_check()
697 page_cpupid_reset_last(page); in free_pages_check()
698 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) in free_pages_check()
699 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_check()
728 struct page *page; in free_pcppages_bulk() local
752 page = list_entry(list->prev, struct page, lru); in free_pcppages_bulk()
754 list_del(&page->lru); in free_pcppages_bulk()
755 mt = get_freepage_migratetype(page); in free_pcppages_bulk()
757 mt = get_pageblock_migratetype(page); in free_pcppages_bulk()
760 __free_one_page(page, page_to_pfn(page), zone, 0, mt); in free_pcppages_bulk()
761 trace_mm_page_pcpu_drain(page, 0, mt); in free_pcppages_bulk()
768 struct page *page, unsigned long pfn, in free_one_page() argument
780 migratetype = get_pfnblock_migratetype(page, pfn); in free_one_page()
782 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
786 static int free_tail_pages_check(struct page *head_page, struct page *page) in free_tail_pages_check() argument
790 if (unlikely(!PageTail(page))) { in free_tail_pages_check()
791 bad_page(page, "PageTail not set", 0); in free_tail_pages_check()
794 if (unlikely(page->first_page != head_page)) { in free_tail_pages_check()
795 bad_page(page, "first_page not consistent", 0); in free_tail_pages_check()
801 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
803 bool compound = PageCompound(page); in free_pages_prepare()
806 VM_BUG_ON_PAGE(PageTail(page), page); in free_pages_prepare()
807 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
809 trace_mm_page_free(page, order); in free_pages_prepare()
810 kmemcheck_free_shadow(page, order); in free_pages_prepare()
811 kasan_free_pages(page, order); in free_pages_prepare()
813 if (PageAnon(page)) in free_pages_prepare()
814 page->mapping = NULL; in free_pages_prepare()
815 bad += free_pages_check(page); in free_pages_prepare()
818 bad += free_tail_pages_check(page, page + i); in free_pages_prepare()
819 bad += free_pages_check(page + i); in free_pages_prepare()
824 reset_page_owner(page, order); in free_pages_prepare()
826 if (!PageHighMem(page)) { in free_pages_prepare()
827 debug_check_no_locks_freed(page_address(page), in free_pages_prepare()
829 debug_check_no_obj_freed(page_address(page), in free_pages_prepare()
832 arch_free_page(page, order); in free_pages_prepare()
833 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
838 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
842 unsigned long pfn = page_to_pfn(page); in __free_pages_ok()
844 if (!free_pages_prepare(page, order)) in __free_pages_ok()
847 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok()
850 set_freepage_migratetype(page, migratetype); in __free_pages_ok()
851 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
855 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, in __free_pages_bootmem() argument
859 struct page *p = page; in __free_pages_bootmem()
871 page_zone(page)->managed_pages += nr_pages; in __free_pages_bootmem()
872 set_page_refcounted(page); in __free_pages_bootmem()
873 __free_pages(page, order); in __free_pages_bootmem()
878 void __init init_cma_reserved_pageblock(struct page *page) in init_cma_reserved_pageblock() argument
881 struct page *p = page; in init_cma_reserved_pageblock()
888 set_pageblock_migratetype(page, MIGRATE_CMA); in init_cma_reserved_pageblock()
892 p = page; in init_cma_reserved_pageblock()
899 set_page_refcounted(page); in init_cma_reserved_pageblock()
900 __free_pages(page, pageblock_order); in init_cma_reserved_pageblock()
903 adjust_managed_page_count(page, pageblock_nr_pages); in init_cma_reserved_pageblock()
921 static inline void expand(struct zone *zone, struct page *page, in expand() argument
931 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
942 set_page_guard(zone, &page[size], high, migratetype); in expand()
945 list_add(&page[size].lru, &area->free_list[migratetype]); in expand()
947 set_page_order(&page[size], high); in expand()
954 static inline int check_new_page(struct page *page) in check_new_page() argument
959 if (unlikely(page_mapcount(page))) in check_new_page()
961 if (unlikely(page->mapping != NULL)) in check_new_page()
963 if (unlikely(atomic_read(&page->_count) != 0)) in check_new_page()
965 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page()
969 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { in check_new_page()
974 if (unlikely(page->mem_cgroup)) in check_new_page()
978 bad_page(page, bad_reason, bad_flags); in check_new_page()
984 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
990 struct page *p = page + i; in prep_new_page()
995 set_page_private(page, 0); in prep_new_page()
996 set_page_refcounted(page); in prep_new_page()
998 arch_alloc_page(page, order); in prep_new_page()
999 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
1000 kasan_alloc_pages(page, order); in prep_new_page()
1003 prep_zero_page(page, order, gfp_flags); in prep_new_page()
1006 prep_compound_page(page, order); in prep_new_page()
1008 set_page_owner(page, order, gfp_flags); in prep_new_page()
1017 set_page_pfmemalloc(page); in prep_new_page()
1019 clear_page_pfmemalloc(page); in prep_new_page()
1029 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest()
1034 struct page *page; in __rmqueue_smallest() local
1042 page = list_entry(area->free_list[migratetype].next, in __rmqueue_smallest()
1043 struct page, lru); in __rmqueue_smallest()
1044 list_del(&page->lru); in __rmqueue_smallest()
1045 rmv_page_order(page); in __rmqueue_smallest()
1047 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1048 set_freepage_migratetype(page, migratetype); in __rmqueue_smallest()
1049 return page; in __rmqueue_smallest()
1074 static struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1080 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1090 struct page *start_page, struct page *end_page, in move_freepages()
1093 struct page *page; in move_freepages() local
1108 for (page = start_page; page <= end_page;) { in move_freepages()
1110 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
1112 if (!pfn_valid_within(page_to_pfn(page))) { in move_freepages()
1113 page++; in move_freepages()
1117 if (!PageBuddy(page)) { in move_freepages()
1118 page++; in move_freepages()
1122 order = page_order(page); in move_freepages()
1123 list_move(&page->lru, in move_freepages()
1125 set_freepage_migratetype(page, migratetype); in move_freepages()
1126 page += 1 << order; in move_freepages()
1133 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1137 struct page *start_page, *end_page; in move_freepages_block()
1139 start_pfn = page_to_pfn(page); in move_freepages_block()
1147 start_page = page; in move_freepages_block()
1154 static void change_pageblock_range(struct page *pageblock_page, in change_pageblock_range()
1205 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
1208 unsigned int current_order = page_order(page); in steal_suitable_fallback()
1213 change_pageblock_range(page, current_order, start_type); in steal_suitable_fallback()
1217 pages = move_freepages_block(zone, page, start_type); in steal_suitable_fallback()
1222 set_pageblock_migratetype(page, start_type); in steal_suitable_fallback()
1263 static inline struct page *
1268 struct page *page; in __rmqueue_fallback() local
1282 page = list_entry(area->free_list[fallback_mt].next, in __rmqueue_fallback()
1283 struct page, lru); in __rmqueue_fallback()
1285 steal_suitable_fallback(zone, page, start_migratetype); in __rmqueue_fallback()
1289 list_del(&page->lru); in __rmqueue_fallback()
1290 rmv_page_order(page); in __rmqueue_fallback()
1292 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1302 set_freepage_migratetype(page, start_migratetype); in __rmqueue_fallback()
1304 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1307 return page; in __rmqueue_fallback()
1317 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue()
1320 struct page *page; in __rmqueue() local
1323 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1325 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { in __rmqueue()
1327 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
1329 if (!page) in __rmqueue()
1330 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1337 if (!page) { in __rmqueue()
1343 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1344 return page; in __rmqueue()
1360 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk() local
1361 if (unlikely(page == NULL)) in rmqueue_bulk()
1374 list_add(&page->lru, list); in rmqueue_bulk()
1376 list_add_tail(&page->lru, list); in rmqueue_bulk()
1377 list = &page->lru; in rmqueue_bulk()
1378 if (is_migrate_cma(get_freepage_migratetype(page))) in rmqueue_bulk()
1540 struct page *page = pfn_to_page(pfn); in mark_free_pages() local
1542 if (!swsusp_page_is_forbidden(page)) in mark_free_pages()
1543 swsusp_unset_page_free(page); in mark_free_pages()
1550 pfn = page_to_pfn(list_entry(curr, struct page, lru)); in mark_free_pages()
1563 void free_hot_cold_page(struct page *page, bool cold) in free_hot_cold_page() argument
1565 struct zone *zone = page_zone(page); in free_hot_cold_page()
1568 unsigned long pfn = page_to_pfn(page); in free_hot_cold_page()
1571 if (!free_pages_prepare(page, 0)) in free_hot_cold_page()
1574 migratetype = get_pfnblock_migratetype(page, pfn); in free_hot_cold_page()
1575 set_freepage_migratetype(page, migratetype); in free_hot_cold_page()
1588 free_one_page(zone, page, pfn, 0, migratetype); in free_hot_cold_page()
1596 list_add(&page->lru, &pcp->lists[migratetype]); in free_hot_cold_page()
1598 list_add_tail(&page->lru, &pcp->lists[migratetype]); in free_hot_cold_page()
1615 struct page *page, *next; in free_hot_cold_page_list() local
1617 list_for_each_entry_safe(page, next, list, lru) { in free_hot_cold_page_list()
1618 trace_mm_page_free_batched(page, cold); in free_hot_cold_page_list()
1619 free_hot_cold_page(page, cold); in free_hot_cold_page_list()
1631 void split_page(struct page *page, unsigned int order) in split_page() argument
1635 VM_BUG_ON_PAGE(PageCompound(page), page); in split_page()
1636 VM_BUG_ON_PAGE(!page_count(page), page); in split_page()
1643 if (kmemcheck_page_is_tracked(page)) in split_page()
1644 split_page(virt_to_page(page[0].shadow), order); in split_page()
1647 set_page_owner(page, 0, 0); in split_page()
1649 set_page_refcounted(page + i); in split_page()
1650 set_page_owner(page + i, 0, 0); in split_page()
1655 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
1661 BUG_ON(!PageBuddy(page)); in __isolate_free_page()
1663 zone = page_zone(page); in __isolate_free_page()
1664 mt = get_pageblock_migratetype(page); in __isolate_free_page()
1676 list_del(&page->lru); in __isolate_free_page()
1678 rmv_page_order(page); in __isolate_free_page()
1682 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
1683 for (; page < endpage; page += pageblock_nr_pages) { in __isolate_free_page()
1684 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
1686 set_pageblock_migratetype(page, in __isolate_free_page()
1691 set_page_owner(page, order, 0); in __isolate_free_page()
1705 int split_free_page(struct page *page) in split_free_page() argument
1710 order = page_order(page); in split_free_page()
1712 nr_pages = __isolate_free_page(page, order); in split_free_page()
1717 set_page_refcounted(page); in split_free_page()
1718 split_page(page, order); in split_free_page()
1726 struct page *buffered_rmqueue(struct zone *preferred_zone, in buffered_rmqueue()
1731 struct page *page; in buffered_rmqueue() local
1750 page = list_entry(list->prev, struct page, lru); in buffered_rmqueue()
1752 page = list_entry(list->next, struct page, lru); in buffered_rmqueue()
1754 list_del(&page->lru); in buffered_rmqueue()
1771 page = __rmqueue(zone, order, migratetype); in buffered_rmqueue()
1773 if (!page) in buffered_rmqueue()
1776 get_freepage_migratetype(page)); in buffered_rmqueue()
1788 VM_BUG_ON_PAGE(bad_range(zone, page), page); in buffered_rmqueue()
1789 return page; in buffered_rmqueue()
2108 static struct page *
2114 struct page *page = NULL; in get_page_from_freelist() local
2251 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2253 if (page) { in get_page_from_freelist()
2254 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2256 return page; in get_page_from_freelist()
2395 static inline struct page *
2399 struct page *page; in __alloc_pages_may_oom() local
2418 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2420 if (page) in __alloc_pages_may_oom()
2453 return page; in __alloc_pages_may_oom()
2458 static struct page *
2465 struct page *page; in __alloc_pages_direct_compact() local
2491 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2494 if (page) { in __alloc_pages_direct_compact()
2495 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact()
2500 return page; in __alloc_pages_direct_compact()
2514 static inline struct page *
2554 static inline struct page *
2559 struct page *page = NULL; in __alloc_pages_direct_reclaim() local
2571 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2578 if (!page && !drained) { in __alloc_pages_direct_reclaim()
2584 return page; in __alloc_pages_direct_reclaim()
2591 static inline struct page *
2595 struct page *page; in __alloc_pages_high_priority() local
2598 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2601 if (!page && gfp_mask & __GFP_NOFAIL) in __alloc_pages_high_priority()
2604 } while (!page && (gfp_mask & __GFP_NOFAIL)); in __alloc_pages_high_priority()
2606 return page; in __alloc_pages_high_priority()
2673 static inline struct page *
2678 struct page *page = NULL; in __alloc_pages_slowpath() local
2728 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
2730 if (page) in __alloc_pages_slowpath()
2742 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
2744 if (page) { in __alloc_pages_slowpath()
2772 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2776 if (page) in __alloc_pages_slowpath()
2820 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2822 if (page) in __alloc_pages_slowpath()
2835 page = __alloc_pages_may_oom(gfp_mask, order, ac, in __alloc_pages_slowpath()
2837 if (page) in __alloc_pages_slowpath()
2851 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
2855 if (page) in __alloc_pages_slowpath()
2862 return page; in __alloc_pages_slowpath()
2868 struct page *
2873 struct page *page = NULL; in __alloc_pages_nodemask() local
2918 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
2919 if (unlikely(!page)) { in __alloc_pages_nodemask()
2927 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
2930 if (kmemcheck_enabled && page) in __alloc_pages_nodemask()
2931 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
2933 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
2942 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) in __alloc_pages_nodemask()
2945 return page; in __alloc_pages_nodemask()
2954 struct page *page; in __get_free_pages() local
2962 page = alloc_pages(gfp_mask, order); in __get_free_pages()
2963 if (!page) in __get_free_pages()
2965 return (unsigned long) page_address(page); in __get_free_pages()
2975 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
2977 if (put_page_testzero(page)) { in __free_pages()
2979 free_hot_cold_page(page, false); in __free_pages()
2981 __free_pages_ok(page, order); in __free_pages()
3004 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages()
3006 struct page *page; in alloc_kmem_pages() local
3011 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3012 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages()
3013 return page; in alloc_kmem_pages()
3016 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node()
3018 struct page *page; in alloc_kmem_pages_node() local
3023 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3024 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages_node()
3025 return page; in alloc_kmem_pages_node()
3032 void __free_kmem_pages(struct page *page, unsigned int order) in __free_kmem_pages() argument
3034 memcg_kmem_uncharge_pages(page, order); in __free_kmem_pages()
3035 __free_pages(page, order); in __free_kmem_pages()
3100 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
4105 struct page *page; in setup_zone_migrate_reserve() local
4140 page = pfn_to_page(pfn); in setup_zone_migrate_reserve()
4143 if (page_to_nid(page) != zone_to_nid(zone)) in setup_zone_migrate_reserve()
4146 block_migratetype = get_pageblock_migratetype(page); in setup_zone_migrate_reserve()
4166 set_pageblock_migratetype(page, in setup_zone_migrate_reserve()
4168 move_freepages_block(zone, page, in setup_zone_migrate_reserve()
4186 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in setup_zone_migrate_reserve()
4187 move_freepages_block(zone, page, MIGRATE_MOVABLE); in setup_zone_migrate_reserve()
4200 struct page *page; in memmap_init_zone() local
4221 page = pfn_to_page(pfn); in memmap_init_zone()
4222 set_page_links(page, zone, nid, pfn); in memmap_init_zone()
4223 mminit_verify_page_links(page, zone, nid, pfn); in memmap_init_zone()
4224 init_page_count(page); in memmap_init_zone()
4225 page_mapcount_reset(page); in memmap_init_zone()
4226 page_cpupid_reset_last(page); in memmap_init_zone()
4227 SetPageReserved(page); in memmap_init_zone()
4245 set_pageblock_migratetype(page, MIGRATE_MOVABLE); in memmap_init_zone()
4247 INIT_LIST_HEAD(&page->lru); in memmap_init_zone()
4251 set_page_address(page, __va(pfn << PAGE_SHIFT)); in memmap_init_zone()
4915 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; in calc_memmap_size()
5037 struct page *map; in alloc_node_mem_map()
5047 size = (end - start) * sizeof(struct page); in alloc_node_mem_map()
5531 void adjust_managed_page_count(struct page *page, long count) in adjust_managed_page_count() argument
5534 page_zone(page)->managed_pages += count; in adjust_managed_page_count()
5537 if (PageHighMem(page)) in adjust_managed_page_count()
5566 void free_highmem_page(struct page *page) in free_highmem_page() argument
5568 __free_reserved_page(page); in free_highmem_page()
5570 page_zone(page)->managed_pages++; in free_highmem_page()
6188 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, in get_pfnblock_flags_mask() argument
6197 zone = page_zone(page); in get_pfnblock_flags_mask()
6216 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, in set_pfnblock_flags_mask() argument
6228 zone = page_zone(page); in set_pfnblock_flags_mask()
6234 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); in set_pfnblock_flags_mask()
6257 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, in has_unmovable_pages() argument
6269 mt = get_pageblock_migratetype(page); in has_unmovable_pages()
6273 pfn = page_to_pfn(page); in has_unmovable_pages()
6280 page = pfn_to_page(check); in has_unmovable_pages()
6287 if (PageHuge(page)) { in has_unmovable_pages()
6288 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; in has_unmovable_pages()
6298 if (!atomic_read(&page->_count)) { in has_unmovable_pages()
6299 if (PageBuddy(page)) in has_unmovable_pages()
6300 iter += (1 << page_order(page)) - 1; in has_unmovable_pages()
6308 if (skip_hwpoisoned_pages && PageHWPoison(page)) in has_unmovable_pages()
6311 if (!PageLRU(page)) in has_unmovable_pages()
6332 bool is_pageblock_removable_nolock(struct page *page) in is_pageblock_removable_nolock() argument
6344 if (!node_online(page_to_nid(page))) in is_pageblock_removable_nolock()
6347 zone = page_zone(page); in is_pageblock_removable_nolock()
6348 pfn = page_to_pfn(page); in is_pageblock_removable_nolock()
6352 return !has_unmovable_pages(zone, page, 0, true); in is_pageblock_removable_nolock()
6546 struct page *page = pfn_to_page(pfn); in free_contig_range() local
6548 count += page_count(page) != 1; in free_contig_range()
6549 __free_page(page); in free_contig_range()
6597 struct page *page; in __offline_isolated_pages() local
6616 page = pfn_to_page(pfn); in __offline_isolated_pages()
6621 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { in __offline_isolated_pages()
6623 SetPageReserved(page); in __offline_isolated_pages()
6627 BUG_ON(page_count(page)); in __offline_isolated_pages()
6628 BUG_ON(!PageBuddy(page)); in __offline_isolated_pages()
6629 order = page_order(page); in __offline_isolated_pages()
6634 list_del(&page->lru); in __offline_isolated_pages()
6635 rmv_page_order(page); in __offline_isolated_pages()
6638 SetPageReserved((page+i)); in __offline_isolated_pages()
6646 bool is_free_buddy_page(struct page *page) in is_free_buddy_page() argument
6648 struct zone *zone = page_zone(page); in is_free_buddy_page()
6649 unsigned long pfn = page_to_pfn(page); in is_free_buddy_page()
6655 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()