Lines Matching refs:order

187 static void __free_pages_ok(struct page *page, unsigned int order);
465 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
468 int nr_pages = 1 << order; in prep_compound_page()
471 set_compound_order(page, order); in prep_compound_page()
534 unsigned int order, int migratetype) in set_page_guard() argument
545 set_page_private(page, order); in set_page_guard()
547 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
551 unsigned int order, int migratetype) in clear_page_guard() argument
563 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
568 unsigned int order, int migratetype) {} in set_page_guard() argument
570 unsigned int order, int migratetype) {} in clear_page_guard() argument
573 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
575 set_page_private(page, order); in set_page_order()
601 unsigned int order) in page_is_buddy() argument
606 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
615 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
658 struct zone *zone, unsigned int order, in __free_one_page() argument
674 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
678 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); in __free_one_page()
682 while (order < max_order - 1) { in __free_one_page()
683 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
685 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
692 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
695 zone->free_area[order].nr_free--; in __free_one_page()
701 order++; in __free_one_page()
715 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
729 set_page_order(page, order); in __free_one_page()
739 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { in __free_one_page()
743 buddy_idx = __find_buddy_index(combined_idx, order + 1); in __free_one_page()
745 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
747 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
752 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
754 zone->free_area[order].nr_free++; in __free_one_page()
855 unsigned int order, in free_one_page() argument
868 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
973 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
979 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
981 trace_mm_page_free(page, order); in free_pages_prepare()
982 kmemcheck_free_shadow(page, order); in free_pages_prepare()
983 kasan_free_pages(page, order); in free_pages_prepare()
988 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
996 reset_page_owner(page, order); in free_pages_prepare()
1000 PAGE_SIZE << order); in free_pages_prepare()
1002 PAGE_SIZE << order); in free_pages_prepare()
1004 arch_free_page(page, order); in free_pages_prepare()
1005 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
1010 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
1016 if (!free_pages_prepare(page, order)) in __free_pages_ok()
1021 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1022 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
1027 unsigned long pfn, unsigned int order) in __free_pages_boot_core() argument
1029 unsigned int nr_pages = 1 << order; in __free_pages_boot_core()
1044 __free_pages(page, order); in __free_pages_boot_core()
1100 unsigned int order) in __free_pages_bootmem() argument
1104 return __free_pages_boot_core(page, pfn, order); in __free_pages_bootmem()
1384 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1389 for (i = 0; i < (1 << order); i++) { in prep_new_page()
1398 arch_alloc_page(page, order); in prep_new_page()
1399 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
1400 kasan_alloc_pages(page, order); in prep_new_page()
1403 for (i = 0; i < (1 << order); i++) in prep_new_page()
1406 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1407 prep_compound_page(page, order); in prep_new_page()
1409 set_page_owner(page, order, gfp_flags); in prep_new_page()
1430 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1438 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
1448 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1475 unsigned int order) in __rmqueue_cma_fallback() argument
1477 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1481 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1494 unsigned int order; in move_freepages() local
1522 order = page_order(page); in move_freepages()
1524 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1525 page += 1 << order; in move_freepages()
1526 pages_moved += 1 << order; in move_freepages()
1576 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
1585 if (order >= pageblock_order) in can_steal_fallback()
1588 if (order >= pageblock_order / 2 || in can_steal_fallback()
1630 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
1648 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
1711 int order; in unreserve_highatomic_pageblock() local
1720 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
1721 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
1758 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) in __rmqueue_fallback() argument
1768 current_order >= order && current_order <= MAX_ORDER-1; in __rmqueue_fallback()
1786 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1797 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1810 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1815 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1818 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
1821 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1824 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1833 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1841 struct page *page = __rmqueue(zone, order, migratetype, 0); in rmqueue_bulk()
1861 -(1 << order)); in rmqueue_bulk()
1863 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2010 unsigned int order, t; in mark_free_pages() local
2027 for_each_migratetype_order(order, t) { in mark_free_pages()
2028 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
2032 for (i = 0; i < (1UL << order); i++) in mark_free_pages()
2112 void split_page(struct page *page, unsigned int order) in split_page() argument
2126 split_page(virt_to_page(page[0].shadow), order); in split_page()
2131 for (i = 1; i < (1 << order); i++) { in split_page()
2138 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2151 watermark = low_wmark_pages(zone) + (1 << order); in __isolate_free_page()
2155 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2160 zone->free_area[order].nr_free--; in __isolate_free_page()
2163 set_page_owner(page, order, __GFP_MOVABLE); in __isolate_free_page()
2166 if (order >= pageblock_order - 1) { in __isolate_free_page()
2167 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2177 return 1UL << order; in __isolate_free_page()
2192 unsigned int order; in split_free_page() local
2195 order = page_order(page); in split_free_page()
2197 nr_pages = __isolate_free_page(page, order); in split_free_page()
2203 split_page(page, order); in split_free_page()
2212 struct zone *zone, unsigned int order, in buffered_rmqueue() argument
2219 if (likely(order == 0)) { in buffered_rmqueue()
2253 WARN_ON_ONCE(order > 1); in buffered_rmqueue()
2259 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in buffered_rmqueue()
2261 trace_mm_page_alloc_zone_locked(page, order, migratetype); in buffered_rmqueue()
2264 page = __rmqueue(zone, order, migratetype, gfp_flags); in buffered_rmqueue()
2268 __mod_zone_freepage_state(zone, -(1 << order), in buffered_rmqueue()
2272 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); in buffered_rmqueue()
2277 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
2310 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2312 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
2322 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
2360 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2373 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok() argument
2382 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
2412 if (!order) in __zone_watermark_ok()
2416 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
2441 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
2444 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
2448 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
2456 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, in zone_watermark_ok_safe()
2500 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
2569 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2582 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2592 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2601 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2604 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2611 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
2612 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
2660 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) in warn_alloc_failed() argument
2695 current->comm, order, gfp_mask); in warn_alloc_failed()
2703 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
2710 .order = order, in __alloc_pages_may_oom()
2731 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2741 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
2773 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2781 if (!order) in __alloc_pages_direct_compact()
2785 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
2805 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2812 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
2829 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2840 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
2855 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2869 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2876 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
2881 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2904 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2910 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2921 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) in wake_all_kswapds() argument
2928 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); in wake_all_kswapds()
2990 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
3008 if (order >= MAX_ORDER) { in __alloc_pages_slowpath()
3031 wake_all_kswapds(order, ac); in __alloc_pages_slowpath()
3052 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
3066 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
3096 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3143 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3154 if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) || in __alloc_pages_slowpath()
3155 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { in __alloc_pages_slowpath()
3162 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
3176 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, in __alloc_pages_slowpath()
3183 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
3192 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
3212 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
3245 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
3255 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
3259 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
3261 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
3280 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
3290 page = alloc_pages(gfp_mask, order); in __get_free_pages()
3303 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
3306 if (order == 0) in __free_pages()
3309 __free_pages_ok(page, order); in __free_pages()
3315 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
3319 __free_pages(virt_to_page((void *)addr), order); in free_pages()
3430 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages() argument
3434 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3435 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages()
3436 __free_pages(page, order); in alloc_kmem_pages()
3442 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument
3446 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3447 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages_node()
3448 __free_pages(page, order); in alloc_kmem_pages_node()
3458 void __free_kmem_pages(struct page *page, unsigned int order) in __free_kmem_pages() argument
3460 memcg_kmem_uncharge(page, order); in __free_kmem_pages()
3461 __free_pages(page, order); in __free_kmem_pages()
3464 void free_kmem_pages(unsigned long addr, unsigned int order) in free_kmem_pages() argument
3468 __free_kmem_pages(virt_to_page((void *)addr), order); in free_kmem_pages()
3472 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
3476 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
3479 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
3503 unsigned int order = get_order(size); in alloc_pages_exact() local
3506 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
3507 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
3523 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
3524 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
3527 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
3825 unsigned int order; in show_free_areas() local
3835 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3836 struct free_area *area = &zone->free_area[order]; in show_free_areas()
3839 nr[order] = area->nr_free; in show_free_areas()
3840 total += nr[order] << order; in show_free_areas()
3842 types[order] = 0; in show_free_areas()
3845 types[order] |= 1 << type; in show_free_areas()
3849 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3850 printk("%lu*%lukB ", nr[order], K(1UL) << order); in show_free_areas()
3851 if (nr[order]) in show_free_areas()
3852 show_migration_types(types[order]); in show_free_areas()
4175 unsigned int order = current_zonelist_order; in build_zonelists() local
4205 if (order == ZONELIST_ORDER_NODE) in build_zonelists()
4211 if (order == ZONELIST_ORDER_ZONE) { in build_zonelists()
4542 unsigned int order, t; in zone_init_free_lists() local
4543 for_each_migratetype_order(order, t) { in zone_init_free_lists()
4544 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
4545 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
5136 unsigned int order; in set_pageblock_order() local
5143 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
5145 order = MAX_ORDER - 1; in set_pageblock_order()
5152 pageblock_order = order; in set_pageblock_order()
6705 unsigned int order; in alloc_contig_range() local
6710 .order = -1, in alloc_contig_range()
6771 order = 0; in alloc_contig_range()
6774 if (++order >= MAX_ORDER) { in alloc_contig_range()
6778 outer_start &= ~0UL << order; in alloc_contig_range()
6866 unsigned int order, i; in __offline_isolated_pages() local
6896 order = page_order(page); in __offline_isolated_pages()
6899 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
6903 zone->free_area[order].nr_free--; in __offline_isolated_pages()
6904 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
6906 pfn += (1 << order); in __offline_isolated_pages()
6918 unsigned int order; in is_free_buddy_page() local
6921 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
6922 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6924 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
6929 return order < MAX_ORDER; in is_free_buddy_page()