Lines Matching refs:ac

1704 static void unreserve_highatomic_pageblock(const struct alloc_context *ac)  in unreserve_highatomic_pageblock()  argument
1706 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock()
1713 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in unreserve_highatomic_pageblock()
1714 ac->nodemask) { in unreserve_highatomic_pageblock()
1747 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
1748 move_freepages_block(zone, page, ac->migratetype); in unreserve_highatomic_pageblock()
2501 const struct alloc_context *ac) in get_page_from_freelist() argument
2503 struct zonelist *zonelist = ac->zonelist; in get_page_from_freelist()
2517 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, in get_page_from_freelist()
2518 ac->nodemask) { in get_page_from_freelist()
2532 if (!zone_local(ac->preferred_zone, zone)) in get_page_from_freelist()
2565 if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) in get_page_from_freelist()
2570 ac->classzone_idx, alloc_flags)) { in get_page_from_freelist()
2579 !zone_allows_reclaim(ac->preferred_zone, zone)) in get_page_from_freelist()
2593 ac->classzone_idx, alloc_flags)) in get_page_from_freelist()
2601 page = buffered_rmqueue(ac->preferred_zone, zone, order, in get_page_from_freelist()
2602 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
2630 reset_alloc_batches(ac->preferred_zone); in get_page_from_freelist()
2704 const struct alloc_context *ac, unsigned long *did_some_progress) in __alloc_pages_may_oom() argument
2707 .zonelist = ac->zonelist, in __alloc_pages_may_oom()
2708 .nodemask = ac->nodemask, in __alloc_pages_may_oom()
2732 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); in __alloc_pages_may_oom()
2744 if (ac->high_zoneidx < ZONE_NORMAL) in __alloc_pages_may_oom()
2774 int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_compact() argument
2785 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
2806 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); in __alloc_pages_direct_compact()
2830 int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_compact() argument
2841 const struct alloc_context *ac) in __perform_reclaim() argument
2855 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2856 ac->nodemask); in __perform_reclaim()
2870 int alloc_flags, const struct alloc_context *ac, in __alloc_pages_direct_reclaim() argument
2876 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
2882 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); in __alloc_pages_direct_reclaim()
2890 unreserve_highatomic_pageblock(ac); in __alloc_pages_direct_reclaim()
2905 const struct alloc_context *ac) in __alloc_pages_high_priority() argument
2911 ALLOC_NO_WATERMARKS, ac); in __alloc_pages_high_priority()
2914 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, in __alloc_pages_high_priority()
2921 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) in wake_all_kswapds() argument
2926 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in wake_all_kswapds()
2927 ac->high_zoneidx, ac->nodemask) in wake_all_kswapds()
2928 wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone)); in wake_all_kswapds()
2991 struct alloc_context *ac) in __alloc_pages_slowpath() argument
3031 wake_all_kswapds(order, ac); in __alloc_pages_slowpath()
3044 if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) { in __alloc_pages_slowpath()
3046 preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
3047 ac->high_zoneidx, NULL, &ac->preferred_zone); in __alloc_pages_slowpath()
3048 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref); in __alloc_pages_slowpath()
3053 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); in __alloc_pages_slowpath()
3064 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); in __alloc_pages_slowpath()
3066 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
3096 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3143 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3157 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50); in __alloc_pages_slowpath()
3162 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
3177 ac, migration_mode, in __alloc_pages_slowpath()
3200 struct alloc_context ac = { in __alloc_pages_nodemask() local
3223 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) in __alloc_pages_nodemask()
3230 ac.zonelist = zonelist; in __alloc_pages_nodemask()
3233 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); in __alloc_pages_nodemask()
3236 preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx, in __alloc_pages_nodemask()
3237 ac.nodemask ? : &cpuset_current_mems_allowed, in __alloc_pages_nodemask()
3238 &ac.preferred_zone); in __alloc_pages_nodemask()
3239 if (!ac.preferred_zone) in __alloc_pages_nodemask()
3241 ac.classzone_idx = zonelist_zone_idx(preferred_zoneref); in __alloc_pages_nodemask()
3245 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
3253 ac.spread_dirty_pages = false; in __alloc_pages_nodemask()
3255 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
3261 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()