Lines Matching refs:order
168 static void __free_pages_ok(struct page *page, unsigned int order);
365 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
368 int nr_pages = 1 << order; in prep_compound_page()
371 set_compound_order(page, order); in prep_compound_page()
383 static inline void prep_zero_page(struct page *page, unsigned int order, in prep_zero_page() argument
393 for (i = 0; i < (1 << order); i++) in prep_zero_page()
451 unsigned int order, int migratetype) in set_page_guard() argument
462 set_page_private(page, order); in set_page_guard()
464 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
468 unsigned int order, int migratetype) in clear_page_guard() argument
480 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
485 unsigned int order, int migratetype) {} in set_page_guard() argument
487 unsigned int order, int migratetype) {} in clear_page_guard() argument
490 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
492 set_page_private(page, order); in set_page_order()
518 unsigned int order) in page_is_buddy() argument
523 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
532 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
575 struct zone *zone, unsigned int order, in __free_one_page() argument
591 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
595 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); in __free_one_page()
599 while (order < max_order - 1) { in __free_one_page()
600 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
602 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
609 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
612 zone->free_area[order].nr_free--; in __free_one_page()
618 order++; in __free_one_page()
632 buddy_idx = __find_buddy_index(page_idx, order); in __free_one_page()
646 set_page_order(page, order); in __free_one_page()
656 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { in __free_one_page()
660 buddy_idx = __find_buddy_index(combined_idx, order + 1); in __free_one_page()
662 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
664 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
669 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
671 zone->free_area[order].nr_free++; in __free_one_page()
769 unsigned int order, in free_one_page() argument
782 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
801 static bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
807 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
809 trace_mm_page_free(page, order); in free_pages_prepare()
810 kmemcheck_free_shadow(page, order); in free_pages_prepare()
811 kasan_free_pages(page, order); in free_pages_prepare()
816 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
824 reset_page_owner(page, order); in free_pages_prepare()
828 PAGE_SIZE << order); in free_pages_prepare()
830 PAGE_SIZE << order); in free_pages_prepare()
832 arch_free_page(page, order); in free_pages_prepare()
833 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
838 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
844 if (!free_pages_prepare(page, order)) in __free_pages_ok()
849 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
851 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
856 unsigned int order) in __free_pages_bootmem() argument
858 unsigned int nr_pages = 1 << order; in __free_pages_bootmem()
873 __free_pages(page, order); in __free_pages_bootmem()
984 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
989 for (i = 0; i < (1 << order); i++) { in prep_new_page()
998 arch_alloc_page(page, order); in prep_new_page()
999 kernel_map_pages(page, 1 << order, 1); in prep_new_page()
1000 kasan_alloc_pages(page, order); in prep_new_page()
1003 prep_zero_page(page, order, gfp_flags); in prep_new_page()
1005 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1006 prep_compound_page(page, order); in prep_new_page()
1008 set_page_owner(page, order, gfp_flags); in prep_new_page()
1029 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1037 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
1047 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1075 unsigned int order) in __rmqueue_cma_fallback() argument
1077 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1081 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1094 unsigned int order; in move_freepages() local
1122 order = page_order(page); in move_freepages()
1124 &zone->free_area[order].free_list[migratetype]); in move_freepages()
1126 page += 1 << order; in move_freepages()
1127 pages_moved += 1 << order; in move_freepages()
1177 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
1186 if (order >= pageblock_order) in can_steal_fallback()
1189 if (order >= pageblock_order / 2 || in can_steal_fallback()
1231 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
1249 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
1264 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) in __rmqueue_fallback() argument
1274 current_order >= order && current_order <= MAX_ORDER-1; in __rmqueue_fallback()
1292 expand(zone, page, order, current_order, area, in __rmqueue_fallback()
1304 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
1317 static struct page *__rmqueue(struct zone *zone, unsigned int order, in __rmqueue() argument
1323 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
1327 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
1330 page = __rmqueue_fallback(zone, order, migratetype); in __rmqueue()
1343 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
1352 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
1360 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
1380 -(1 << order)); in rmqueue_bulk()
1382 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
1529 unsigned int order, t; in mark_free_pages() local
1546 for_each_migratetype_order(order, t) { in mark_free_pages()
1547 list_for_each(curr, &zone->free_area[order].free_list[t]) { in mark_free_pages()
1551 for (i = 0; i < (1UL << order); i++) in mark_free_pages()
1631 void split_page(struct page *page, unsigned int order) in split_page() argument
1644 split_page(virt_to_page(page[0].shadow), order); in split_page()
1648 for (i = 1; i < (1 << order); i++) { in split_page()
1655 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
1668 watermark = low_wmark_pages(zone) + (1 << order); in __isolate_free_page()
1672 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
1677 zone->free_area[order].nr_free--; in __isolate_free_page()
1681 if (order >= pageblock_order - 1) { in __isolate_free_page()
1682 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
1691 set_page_owner(page, order, 0); in __isolate_free_page()
1692 return 1UL << order; in __isolate_free_page()
1707 unsigned int order; in split_free_page() local
1710 order = page_order(page); in split_free_page()
1712 nr_pages = __isolate_free_page(page, order); in split_free_page()
1718 split_page(page, order); in split_free_page()
1727 struct zone *zone, unsigned int order, in buffered_rmqueue() argument
1734 if (likely(order == 0)) { in buffered_rmqueue()
1768 WARN_ON_ONCE(order > 1); in buffered_rmqueue()
1771 page = __rmqueue(zone, order, migratetype); in buffered_rmqueue()
1775 __mod_zone_freepage_state(zone, -(1 << order), in buffered_rmqueue()
1779 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); in buffered_rmqueue()
1784 __count_zone_vm_events(PGALLOC, zone, 1 << order); in buffered_rmqueue()
1817 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1819 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
1828 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
1866 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1877 static bool __zone_watermark_ok(struct zone *z, unsigned int order, in __zone_watermark_ok() argument
1886 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
1899 for (o = 0; o < order; o++) { in __zone_watermark_ok()
1912 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
1915 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
1919 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
1927 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok_safe()
2109 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
2186 if (!zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2219 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2229 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
2251 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2254 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2311 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) in warn_alloc_failed() argument
2346 current->comm, order, gfp_mask); in warn_alloc_failed()
2354 should_alloc_retry(gfp_t gfp_mask, unsigned int order, in should_alloc_retry() argument
2379 if (order <= PAGE_ALLOC_COSTLY_ORDER) in should_alloc_retry()
2389 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) in should_alloc_retry()
2396 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
2418 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2428 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
2448 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) in __alloc_pages_may_oom()
2459 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2467 if (!order) in __alloc_pages_direct_compact()
2471 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
2491 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2498 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
2515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2526 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
2541 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2555 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2562 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
2571 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2592 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2598 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2609 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) in wake_all_kswapds() argument
2616 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); in wake_all_kswapds()
2674 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
2692 if (order >= MAX_ORDER) { in __alloc_pages_slowpath()
2707 wake_all_kswapds(order, ac); in __alloc_pages_slowpath()
2728 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
2742 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
2772 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2820 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2827 if (should_alloc_retry(gfp_mask, order, did_some_progress, in __alloc_pages_slowpath()
2835 page = __alloc_pages_may_oom(gfp_mask, order, ac, in __alloc_pages_slowpath()
2851 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
2860 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
2869 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
2889 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
2918 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
2927 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
2931 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
2933 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
2952 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
2962 page = alloc_pages(gfp_mask, order); in __get_free_pages()
2975 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
2978 if (order == 0) in __free_pages()
2981 __free_pages_ok(page, order); in __free_pages()
2987 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
2991 __free_pages(virt_to_page((void *)addr), order); in free_pages()
3004 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages() argument
3009 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages()
3011 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3012 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages()
3016 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument
3021 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages_node()
3023 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3024 memcg_kmem_commit_charge(page, memcg, order); in alloc_kmem_pages_node()
3032 void __free_kmem_pages(struct page *page, unsigned int order) in __free_kmem_pages() argument
3034 memcg_kmem_uncharge_pages(page, order); in __free_kmem_pages()
3035 __free_pages(page, order); in __free_kmem_pages()
3038 void free_kmem_pages(unsigned long addr, unsigned int order) in free_kmem_pages() argument
3042 __free_kmem_pages(virt_to_page((void *)addr), order); in free_kmem_pages()
3046 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
3050 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
3053 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
3077 unsigned int order = get_order(size); in alloc_pages_exact() local
3080 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
3081 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
3099 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
3100 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
3103 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
3401 unsigned int order; in show_free_areas() local
3411 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3412 struct free_area *area = &zone->free_area[order]; in show_free_areas()
3415 nr[order] = area->nr_free; in show_free_areas()
3416 total += nr[order] << order; in show_free_areas()
3418 types[order] = 0; in show_free_areas()
3421 types[order] |= 1 << type; in show_free_areas()
3425 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
3426 printk("%lu*%lukB ", nr[order], K(1UL) << order); in show_free_areas()
3427 if (nr[order]) in show_free_areas()
3428 show_migration_types(types[order]); in show_free_areas()
3751 unsigned int order = current_zonelist_order; in build_zonelists() local
3781 if (order == ZONELIST_ORDER_NODE) in build_zonelists()
3787 if (order == ZONELIST_ORDER_ZONE) { in build_zonelists()
4258 unsigned int order, t; in zone_init_free_lists() local
4259 for_each_migratetype_order(order, t) { in zone_init_free_lists()
4260 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
4261 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
4866 unsigned int order; in set_pageblock_order() local
4873 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
4875 order = MAX_ORDER - 1; in set_pageblock_order()
4882 pageblock_order = order; in set_pageblock_order()
6438 unsigned int order; in alloc_contig_range() local
6443 .order = -1, in alloc_contig_range()
6504 order = 0; in alloc_contig_range()
6507 if (++order >= MAX_ORDER) { in alloc_contig_range()
6511 outer_start &= ~0UL << order; in alloc_contig_range()
6599 unsigned int order, i; in __offline_isolated_pages() local
6629 order = page_order(page); in __offline_isolated_pages()
6632 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
6636 zone->free_area[order].nr_free--; in __offline_isolated_pages()
6637 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
6639 pfn += (1 << order); in __offline_isolated_pages()
6651 unsigned int order; in is_free_buddy_page() local
6654 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
6655 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6657 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
6662 return order < MAX_ORDER; in is_free_buddy_page()