Lines Matching refs:gfp_mask
1817 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
1821 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
1823 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
1825 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page()
1866 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2109 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
2120 (gfp_mask & __GFP_WRITE); in get_page_from_freelist()
2140 !cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
2219 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2252 gfp_mask, ac->migratetype); in get_page_from_freelist()
2254 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2311 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) in warn_alloc_failed() argument
2315 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || in warn_alloc_failed()
2324 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_failed()
2328 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) in warn_alloc_failed()
2346 current->comm, order, gfp_mask); in warn_alloc_failed()
2354 should_alloc_retry(gfp_t gfp_mask, unsigned int order, in should_alloc_retry() argument
2359 if (gfp_mask & __GFP_NORETRY) in should_alloc_retry()
2363 if (gfp_mask & __GFP_NOFAIL) in should_alloc_retry()
2389 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) in should_alloc_retry()
2396 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
2407 if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) { in __alloc_pages_may_oom()
2418 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2423 if (!(gfp_mask & __GFP_NOFAIL)) { in __alloc_pages_may_oom()
2434 if (!(gfp_mask & __GFP_FS)) { in __alloc_pages_may_oom()
2444 if (gfp_mask & __GFP_THISNODE) in __alloc_pages_may_oom()
2448 if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false) in __alloc_pages_may_oom()
2449 || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) in __alloc_pages_may_oom()
2452 oom_zonelist_unlock(ac->zonelist, gfp_mask); in __alloc_pages_may_oom()
2459 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2471 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
2491 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2515 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2526 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
2537 lockdep_set_current_reclaim_state(gfp_mask); in __perform_reclaim()
2541 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2555 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2562 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
2571 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2592 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2598 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2601 if (!page && gfp_mask & __GFP_NOFAIL) in __alloc_pages_high_priority()
2604 } while (!page && (gfp_mask & __GFP_NOFAIL)); in __alloc_pages_high_priority()
2620 gfp_to_alloc_flags(gfp_t gfp_mask) in gfp_to_alloc_flags() argument
2623 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); in gfp_to_alloc_flags()
2634 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); in gfp_to_alloc_flags()
2641 if (!(gfp_mask & __GFP_NOMEMALLOC)) in gfp_to_alloc_flags()
2651 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { in gfp_to_alloc_flags()
2652 if (gfp_mask & __GFP_MEMALLOC) in gfp_to_alloc_flags()
2662 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags()
2668 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
2670 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); in gfp_pfmemalloc_allowed()
2674 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
2677 const gfp_t wait = gfp_mask & __GFP_WAIT; in __alloc_pages_slowpath()
2693 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); in __alloc_pages_slowpath()
2702 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !wait) in __alloc_pages_slowpath()
2706 if (!(gfp_mask & __GFP_NO_KSWAPD)) in __alloc_pages_slowpath()
2714 alloc_flags = gfp_to_alloc_flags(gfp_mask); in __alloc_pages_slowpath()
2728 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
2740 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); in __alloc_pages_slowpath()
2742 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
2756 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); in __alloc_pages_slowpath()
2765 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) in __alloc_pages_slowpath()
2772 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2780 if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) { in __alloc_pages_slowpath()
2815 if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE || in __alloc_pages_slowpath()
2820 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
2827 if (should_alloc_retry(gfp_mask, order, did_some_progress, in __alloc_pages_slowpath()
2835 page = __alloc_pages_may_oom(gfp_mask, order, ac, in __alloc_pages_slowpath()
2851 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
2860 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
2869 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
2878 .high_zoneidx = gfp_zone(gfp_mask), in __alloc_pages_nodemask()
2880 .migratetype = gfpflags_to_migratetype(gfp_mask), in __alloc_pages_nodemask()
2883 gfp_mask &= gfp_allowed_mask; in __alloc_pages_nodemask()
2885 lockdep_trace_alloc(gfp_mask); in __alloc_pages_nodemask()
2887 might_sleep_if(gfp_mask & __GFP_WAIT); in __alloc_pages_nodemask()
2889 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
2917 alloc_mask = gfp_mask|__GFP_HARDWALL; in __alloc_pages_nodemask()
2925 alloc_mask = memalloc_noio_flags(gfp_mask); in __alloc_pages_nodemask()
2931 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
2952 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
2960 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); in __get_free_pages()
2962 page = alloc_pages(gfp_mask, order); in __get_free_pages()
2969 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page() argument
2971 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page()
3004 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages() argument
3009 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages()
3011 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3016 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument
3021 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) in alloc_kmem_pages_node()
3023 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3075 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact() argument
3080 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
3097 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument
3100 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()