Lines Matching refs:gfp_t

49 #define __GFP_DMA	((__force gfp_t)___GFP_DMA)
50 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
51 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
52 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
71 #define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
72 #define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
73 #define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
74 #define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
75 #define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
76 #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
77 #define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
78 #define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
79 #define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
80 #define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
81 #define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
82 #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
83 #define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
88 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs …
89 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
90 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
91 #define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
92 #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
94 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
95 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
96 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
105 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
150 static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) in gfpflags_to_migratetype()
245 static inline enum zone_type gfp_zone(gfp_t flags) in gfp_zone()
263 static inline int gfp_zonelist(gfp_t flags) in gfp_zonelist()
280 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
293 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
297 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages()
303 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node()
313 static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, in alloc_pages_exact_node()
322 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
325 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages()
329 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
348 extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
349 extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
352 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
353 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
355 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
358 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
389 extern gfp_t gfp_allowed_mask;
392 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);