Lines Matching refs:gfp_mask
2115 gfp_t gfp_mask; in split_page() local
2129 gfp_mask = get_page_owner_gfp(page); in split_page()
2130 set_page_owner(page, 0, gfp_mask); in split_page()
2133 set_page_owner(page + i, 0, gfp_mask); in split_page()
2310 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2314 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page()
2316 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page()
2319 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page()
2360 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
2500 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
2523 !cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
2582 ret = zone_reclaim(zone, gfp_mask, order); in get_page_from_freelist()
2602 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
2604 if (prep_new_page(page, order, gfp_mask, alloc_flags)) in get_page_from_freelist()
2660 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) in warn_alloc_failed() argument
2664 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || in warn_alloc_failed()
2673 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_failed()
2677 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) in warn_alloc_failed()
2695 current->comm, order, gfp_mask); in warn_alloc_failed()
2703 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
2709 .gfp_mask = gfp_mask, in __alloc_pages_may_oom()
2731 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, in __alloc_pages_may_oom()
2736 if (!(gfp_mask & __GFP_NOFAIL)) { in __alloc_pages_may_oom()
2747 if (!(gfp_mask & __GFP_FS)) { in __alloc_pages_may_oom()
2759 if (gfp_mask & __GFP_THISNODE) in __alloc_pages_may_oom()
2763 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) in __alloc_pages_may_oom()
2773 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2785 compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
2805 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_compact()
2829 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
2840 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
2851 lockdep_set_current_reclaim_state(gfp_mask); in __perform_reclaim()
2855 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
2869 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
2876 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
2881 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_direct_reclaim()
2904 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, in __alloc_pages_high_priority() argument
2910 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_high_priority()
2913 if (!page && gfp_mask & __GFP_NOFAIL) in __alloc_pages_high_priority()
2916 } while (!page && (gfp_mask & __GFP_NOFAIL)); in __alloc_pages_high_priority()
2932 gfp_to_alloc_flags(gfp_t gfp_mask) in gfp_to_alloc_flags() argument
2945 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); in gfp_to_alloc_flags()
2947 if (gfp_mask & __GFP_ATOMIC) { in gfp_to_alloc_flags()
2952 if (!(gfp_mask & __GFP_NOMEMALLOC)) in gfp_to_alloc_flags()
2962 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { in gfp_to_alloc_flags()
2963 if (gfp_mask & __GFP_MEMALLOC) in gfp_to_alloc_flags()
2973 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags()
2979 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
2981 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); in gfp_pfmemalloc_allowed()
2984 static inline bool is_thp_gfp_mask(gfp_t gfp_mask) in is_thp_gfp_mask() argument
2986 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; in is_thp_gfp_mask()
2990 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
2993 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; in __alloc_pages_slowpath()
3009 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); in __alloc_pages_slowpath()
3017 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == in __alloc_pages_slowpath()
3019 gfp_mask &= ~__GFP_ATOMIC; in __alloc_pages_slowpath()
3026 if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim) in __alloc_pages_slowpath()
3030 if (gfp_mask & __GFP_KSWAPD_RECLAIM) in __alloc_pages_slowpath()
3038 alloc_flags = gfp_to_alloc_flags(gfp_mask); in __alloc_pages_slowpath()
3052 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_slowpath()
3064 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); in __alloc_pages_slowpath()
3066 page = __alloc_pages_high_priority(gfp_mask, order, ac); in __alloc_pages_slowpath()
3080 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); in __alloc_pages_slowpath()
3089 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) in __alloc_pages_slowpath()
3096 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3104 if (is_thp_gfp_mask(gfp_mask)) { in __alloc_pages_slowpath()
3139 if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD)) in __alloc_pages_slowpath()
3143 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
3149 if (gfp_mask & __GFP_NORETRY) in __alloc_pages_slowpath()
3155 ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) { in __alloc_pages_slowpath()
3162 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
3176 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, in __alloc_pages_slowpath()
3183 warn_alloc_failed(gfp_mask, order, NULL); in __alloc_pages_slowpath()
3192 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, in __alloc_pages_nodemask() argument
3201 .high_zoneidx = gfp_zone(gfp_mask), in __alloc_pages_nodemask()
3203 .migratetype = gfpflags_to_migratetype(gfp_mask), in __alloc_pages_nodemask()
3206 gfp_mask &= gfp_allowed_mask; in __alloc_pages_nodemask()
3208 lockdep_trace_alloc(gfp_mask); in __alloc_pages_nodemask()
3210 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in __alloc_pages_nodemask()
3212 if (should_fail_alloc_page(gfp_mask, order)) in __alloc_pages_nodemask()
3233 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); in __alloc_pages_nodemask()
3244 alloc_mask = gfp_mask|__GFP_HARDWALL; in __alloc_pages_nodemask()
3252 alloc_mask = memalloc_noio_flags(gfp_mask); in __alloc_pages_nodemask()
3259 kmemcheck_pagealloc_alloc(page, order, gfp_mask); in __alloc_pages_nodemask()
3280 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
3288 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); in __get_free_pages()
3290 page = alloc_pages(gfp_mask, order); in __get_free_pages()
3297 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page() argument
3299 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page()
3337 gfp_t gfp_mask) in __page_frag_refill() argument
3340 gfp_t gfp = gfp_mask; in __page_frag_refill()
3343 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | in __page_frag_refill()
3345 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_refill()
3358 unsigned int fragsz, gfp_t gfp_mask) in __alloc_page_frag() argument
3366 page = __page_frag_refill(nc, gfp_mask); in __alloc_page_frag()
3430 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages() argument
3434 page = alloc_pages(gfp_mask, order); in alloc_kmem_pages()
3435 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages()
3442 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in alloc_kmem_pages_node() argument
3446 page = alloc_pages_node(nid, gfp_mask, order); in alloc_kmem_pages_node()
3447 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { in alloc_kmem_pages_node()
3501 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact() argument
3506 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
3521 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument
3524 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()