gfp_mask           40 arch/arm/kernel/module.c 	gfp_t gfp_mask = GFP_KERNEL;
gfp_mask           45 arch/arm/kernel/module.c 		gfp_mask |= __GFP_NOWARN;
gfp_mask           48 arch/arm/kernel/module.c 				gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
gfp_mask           25 arch/arm64/kernel/module.c 	gfp_t gfp_mask = GFP_KERNEL;
gfp_mask           30 arch/arm64/kernel/module.c 		gfp_mask |= __GFP_NOWARN;
gfp_mask           37 arch/arm64/kernel/module.c 				module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
gfp_mask           87 arch/s390/pci/pci_clp.c static void *clp_alloc_block(gfp_t gfp_mask)
gfp_mask           89 arch/s390/pci/pci_clp.c 	return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
gfp_mask          381 arch/um/drivers/net_kern.c 			  struct transport *transport, gfp_t gfp_mask)
gfp_mask          390 arch/um/drivers/net_kern.c 	device = kzalloc(sizeof(*device), gfp_mask);
gfp_mask          560 arch/um/drivers/net_kern.c 			   void **init_out, char **mac_out, gfp_t gfp_mask)
gfp_mask          574 arch/um/drivers/net_kern.c 	*init_out = kmalloc(transport->setup_size, gfp_mask);
gfp_mask          212 arch/x86/platform/efi/efi_64.c 	gfp_t gfp_mask;
gfp_mask          217 arch/x86/platform/efi/efi_64.c 	gfp_mask = GFP_KERNEL | __GFP_ZERO;
gfp_mask          218 arch/x86/platform/efi/efi_64.c 	efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
gfp_mask           38 block/bio-integrity.c 						  gfp_t gfp_mask,
gfp_mask           46 block/bio-integrity.c 		bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
gfp_mask           49 block/bio-integrity.c 		bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
gfp_mask           61 block/bio-integrity.c 		bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
gfp_mask          407 block/bio-integrity.c 			gfp_t gfp_mask)
gfp_mask          414 block/bio-integrity.c 	bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
gfp_mask          169 block/bio.c    struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
gfp_mask          206 block/bio.c    		bvl = mempool_alloc(pool, gfp_mask);
gfp_mask          209 block/bio.c    		gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
gfp_mask          223 block/bio.c    		if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
gfp_mask          429 block/bio.c    struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
gfp_mask          432 block/bio.c    	gfp_t saved_gfp = gfp_mask;
gfp_mask          445 block/bio.c    			    gfp_mask);
gfp_mask          478 block/bio.c    			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
gfp_mask          480 block/bio.c    		p = mempool_alloc(&bs->bio_pool, gfp_mask);
gfp_mask          481 block/bio.c    		if (!p && gfp_mask != saved_gfp) {
gfp_mask          483 block/bio.c    			gfp_mask = saved_gfp;
gfp_mask          484 block/bio.c    			p = mempool_alloc(&bs->bio_pool, gfp_mask);
gfp_mask          500 block/bio.c    		bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
gfp_mask          501 block/bio.c    		if (!bvl && gfp_mask != saved_gfp) {
gfp_mask          503 block/bio.c    			gfp_mask = saved_gfp;
gfp_mask          504 block/bio.c    			bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
gfp_mask          657 block/bio.c    struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
gfp_mask          661 block/bio.c    	b = bio_alloc_bioset(gfp_mask, 0, bs);
gfp_mask          670 block/bio.c    		ret = bio_integrity_clone(b, bio, gfp_mask);
gfp_mask         1145 block/bio.c    					       gfp_t gfp_mask)
gfp_mask         1151 block/bio.c    	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
gfp_mask         1276 block/bio.c    			      gfp_t gfp_mask)
gfp_mask         1286 block/bio.c    	bmd = bio_alloc_map_data(iter, gfp_mask);
gfp_mask         1302 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
gfp_mask         1331 block/bio.c    			page = alloc_page(q->bounce_gfp | gfp_mask);
gfp_mask         1392 block/bio.c    			     gfp_t gfp_mask)
gfp_mask         1401 block/bio.c    	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
gfp_mask         1518 block/bio.c    			 gfp_t gfp_mask)
gfp_mask         1529 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
gfp_mask         1600 block/bio.c    			  gfp_t gfp_mask, int reading)
gfp_mask         1616 block/bio.c    	bio = bio_kmalloc(gfp_mask, nr_pages);
gfp_mask         1627 block/bio.c    		page = alloc_page(q->bounce_gfp | gfp_mask);
gfp_mask          146 block/blk-cgroup.c 				   gfp_t gfp_mask)
gfp_mask          152 block/blk-cgroup.c 	blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
gfp_mask          156 block/blk-cgroup.c 	if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
gfp_mask          159 block/blk-cgroup.c 	if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
gfp_mask          160 block/blk-cgroup.c 	    blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
gfp_mask          178 block/blk-cgroup.c 		pd = pol->pd_alloc_fn(gfp_mask, q, blkcg);
gfp_mask          389 block/blk-core.c struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
gfp_mask          391 block/blk-core.c 	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
gfp_mask          476 block/blk-core.c struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
gfp_mask          482 block/blk-core.c 				gfp_mask | __GFP_ZERO, node_id);
gfp_mask          488 block/blk-core.c 	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
gfp_mask          496 block/blk-core.c 	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
gfp_mask         1617 block/blk-core.c 		      struct bio_set *bs, gfp_t gfp_mask,
gfp_mask         1627 block/blk-core.c 		bio = bio_clone_fast(bio_src, gfp_mask, bs);
gfp_mask          433 block/blk-flush.c int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
gfp_mask          456 block/blk-flush.c 	bio = bio_alloc(gfp_mask, 0);
gfp_mask          373 block/blk-ioc.c 			     gfp_t gfp_mask)
gfp_mask          379 block/blk-ioc.c 	icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
gfp_mask          384 block/blk-ioc.c 	if (radix_tree_maybe_preload(gfp_mask) < 0) {
gfp_mask           26 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, int flags,
gfp_mask           63 block/blk-lib.c 		bio = blk_next_bio(bio, 0, gfp_mask);
gfp_mask           98 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
gfp_mask          105 block/blk-lib.c 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
gfp_mask          132 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
gfp_mask          157 block/blk-lib.c 		bio = blk_next_bio(bio, 1, gfp_mask);
gfp_mask          193 block/blk-lib.c 				sector_t nr_sects, gfp_t gfp_mask,
gfp_mask          201 block/blk-lib.c 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
gfp_mask          213 block/blk-lib.c 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
gfp_mask          233 block/blk-lib.c 		bio = blk_next_bio(bio, 0, gfp_mask);
gfp_mask          269 block/blk-lib.c 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
gfp_mask          285 block/blk-lib.c 				   gfp_mask);
gfp_mask          325 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
gfp_mask          335 block/blk-lib.c 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
gfp_mask          340 block/blk-lib.c 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
gfp_mask          359 block/blk-lib.c 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
gfp_mask          376 block/blk-lib.c 						  gfp_mask, &bio, flags);
gfp_mask          379 block/blk-lib.c 						gfp_mask, &bio);
gfp_mask           66 block/blk-map.c 		gfp_t gfp_mask, bool copy)
gfp_mask           73 block/blk-map.c 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
gfp_mask           75 block/blk-map.c 		bio = bio_map_user_iov(q, iter, gfp_mask);
gfp_mask          122 block/blk-map.c 			const struct iov_iter *iter, gfp_t gfp_mask)
gfp_mask          142 block/blk-map.c 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
gfp_mask          163 block/blk-map.c 		    unsigned long len, gfp_t gfp_mask)
gfp_mask          172 block/blk-map.c 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
gfp_mask          222 block/blk-map.c 		    unsigned int len, gfp_t gfp_mask)
gfp_mask          237 block/blk-map.c 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
gfp_mask          239 block/blk-map.c 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
gfp_mask          209 block/blk-zoned.c static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
gfp_mask          211 block/blk-zoned.c 	struct bio *bio = bio_alloc(gfp_mask, 0);
gfp_mask          255 block/blk-zoned.c 		       gfp_t gfp_mask)
gfp_mask          275 block/blk-zoned.c 		return  __blkdev_reset_all_zones(bdev, gfp_mask);
gfp_mask          289 block/blk-zoned.c 		bio = blk_next_bio(bio, 0, gfp_mask);
gfp_mask          283 block/blk.h    			     gfp_t gfp_mask);
gfp_mask          286 block/blk.h    int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
gfp_mask          300 block/blk.h    static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
gfp_mask          304 block/blk.h    		create_task_io_context(current, gfp_mask, node);
gfp_mask           95 block/bounce.c static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
gfp_mask           97 block/bounce.c 	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
gfp_mask          217 block/bounce.c static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
gfp_mask          246 block/bounce.c 	bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
gfp_mask          273 block/bounce.c 		ret = bio_integrity_clone(bio, bio_src, gfp_mask);
gfp_mask         1012 drivers/base/devres.c 				  gfp_t gfp_mask, unsigned int order)
gfp_mask         1017 drivers/base/devres.c 	addr = __get_free_pages(gfp_mask, order);
gfp_mask         1427 drivers/block/drbd/drbd_int.h extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
gfp_mask          140 drivers/block/drbd/drbd_main.c struct bio *bio_alloc_drbd(gfp_t gfp_mask)
gfp_mask          145 drivers/block/drbd/drbd_main.c 		return bio_alloc(gfp_mask, 1);
gfp_mask          147 drivers/block/drbd/drbd_main.c 	bio = bio_alloc_bioset(gfp_mask, 1, &drbd_md_io_bio_set);
gfp_mask          359 drivers/block/drbd/drbd_receiver.c 		    unsigned int request_size, unsigned int payload_size, gfp_t gfp_mask) __must_hold(local)
gfp_mask          369 drivers/block/drbd/drbd_receiver.c 	peer_req = mempool_alloc(&drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
gfp_mask          371 drivers/block/drbd/drbd_receiver.c 		if (!(gfp_mask & __GFP_NOWARN))
gfp_mask          378 drivers/block/drbd/drbd_receiver.c 					gfpflags_allow_blocking(gfp_mask));
gfp_mask           62 drivers/connector/connector.c 	gfp_t gfp_mask)
gfp_mask           96 drivers/connector/connector.c 	skb = nlmsg_new(size, gfp_mask);
gfp_mask          114 drivers/connector/connector.c 					 gfp_mask);
gfp_mask          116 drivers/connector/connector.c 			!gfpflags_allow_blocking(gfp_mask));
gfp_mask          122 drivers/connector/connector.c 	gfp_t gfp_mask)
gfp_mask          124 drivers/connector/connector.c 	return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask);
gfp_mask          474 drivers/firewire/core-cdev.c 			       struct client_resource *resource, gfp_t gfp_mask)
gfp_mask          476 drivers/firewire/core-cdev.c 	bool preload = gfpflags_allow_blocking(gfp_mask);
gfp_mask          481 drivers/firewire/core-cdev.c 		idr_preload(gfp_mask);
gfp_mask          710 drivers/gpu/drm/i915/gvt/gtt.c static void *alloc_spt(gfp_t gfp_mask)
gfp_mask          714 drivers/gpu/drm/i915/gvt/gtt.c 	spt = kzalloc(sizeof(*spt), gfp_mask);
gfp_mask          718 drivers/gpu/drm/i915/gvt/gtt.c 	spt->shadow_page.page = alloc_page(gfp_mask);
gfp_mask          362 drivers/gpu/drm/ttm/ttm_tt.c 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
gfp_mask          364 drivers/gpu/drm/ttm/ttm_tt.c 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
gfp_mask          365 drivers/gpu/drm/ttm/ttm_tt.c 		from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
gfp_mask          416 drivers/gpu/drm/ttm/ttm_tt.c 		gfp_t gfp_mask = mapping_gfp_mask(swap_space);
gfp_mask          418 drivers/gpu/drm/ttm/ttm_tt.c 		gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
gfp_mask          424 drivers/gpu/drm/ttm/ttm_tt.c 		to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
gfp_mask          308 drivers/greybus/es2.c static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
gfp_mask          335 drivers/greybus/es2.c 	return usb_alloc_urb(0, gfp_mask);
gfp_mask          391 drivers/greybus/es2.c 			struct gb_message *message, gfp_t gfp_mask)
gfp_mask          411 drivers/greybus/es2.c 	urb = next_free_urb(es2, gfp_mask);
gfp_mask          433 drivers/greybus/es2.c 	retval = usb_submit_urb(urb, gfp_mask);
gfp_mask         1005 drivers/infiniband/core/mad.c 				size_t mad_size, gfp_t gfp_mask)
gfp_mask         1019 drivers/infiniband/core/mad.c 		seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
gfp_mask         1053 drivers/infiniband/core/mad.c 					    gfp_t gfp_mask,
gfp_mask         1084 drivers/infiniband/core/mad.c 	buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
gfp_mask         1120 drivers/infiniband/core/mad.c 		ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
gfp_mask          558 drivers/infiniband/core/multicast.c 					 union ib_gid *mgid, gfp_t gfp_mask)
gfp_mask          573 drivers/infiniband/core/multicast.c 	group = kzalloc(sizeof *group, gfp_mask);
gfp_mask          610 drivers/infiniband/core/multicast.c 		     ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
gfp_mask          624 drivers/infiniband/core/multicast.c 	member = kmalloc(sizeof *member, gfp_mask);
gfp_mask          639 drivers/infiniband/core/multicast.c 				      &rec->mgid, gfp_mask);
gfp_mask           55 drivers/infiniband/core/sa.h 			     unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask          832 drivers/infiniband/core/sa_query.c static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
gfp_mask          845 drivers/infiniband/core/sa_query.c 	skb = nlmsg_new(len, gfp_mask);
gfp_mask          863 drivers/infiniband/core/sa_query.c 	return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
gfp_mask          866 drivers/infiniband/core/sa_query.c static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
gfp_mask          885 drivers/infiniband/core/sa_query.c 	ret = ib_nl_send_msg(query, gfp_mask);
gfp_mask         1295 drivers/infiniband/core/sa_query.c static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
gfp_mask         1321 drivers/infiniband/core/sa_query.c 					    gfp_mask,
gfp_mask         1363 drivers/infiniband/core/sa_query.c 		    gfp_t gfp_mask)
gfp_mask         1369 drivers/infiniband/core/sa_query.c 	ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
gfp_mask         1381 drivers/infiniband/core/sa_query.c 			if (!ib_nl_make_request(query, gfp_mask))
gfp_mask         1546 drivers/infiniband/core/sa_query.c 		       unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask         1571 drivers/infiniband/core/sa_query.c 	query = kzalloc(sizeof(*query), gfp_mask);
gfp_mask         1585 drivers/infiniband/core/sa_query.c 				kmalloc(sizeof(*query->conv_pr), gfp_mask);
gfp_mask         1593 drivers/infiniband/core/sa_query.c 	ret = alloc_mad(&query->sa_query, gfp_mask);
gfp_mask         1629 drivers/infiniband/core/sa_query.c 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
gfp_mask         1700 drivers/infiniband/core/sa_query.c 			    unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask         1725 drivers/infiniband/core/sa_query.c 	query = kzalloc(sizeof(*query), gfp_mask);
gfp_mask         1730 drivers/infiniband/core/sa_query.c 	ret = alloc_mad(&query->sa_query, gfp_mask);
gfp_mask         1753 drivers/infiniband/core/sa_query.c 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
gfp_mask         1797 drivers/infiniband/core/sa_query.c 			     unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask         1817 drivers/infiniband/core/sa_query.c 	query = kzalloc(sizeof(*query), gfp_mask);
gfp_mask         1822 drivers/infiniband/core/sa_query.c 	ret = alloc_mad(&query->sa_query, gfp_mask);
gfp_mask         1845 drivers/infiniband/core/sa_query.c 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
gfp_mask         1888 drivers/infiniband/core/sa_query.c 			      unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask         1914 drivers/infiniband/core/sa_query.c 	query = kzalloc(sizeof(*query), gfp_mask);
gfp_mask         1919 drivers/infiniband/core/sa_query.c 	ret = alloc_mad(&query->sa_query, gfp_mask);
gfp_mask         1943 drivers/infiniband/core/sa_query.c 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
gfp_mask         2063 drivers/infiniband/core/sa_query.c 	gfp_t gfp_mask = GFP_KERNEL;
gfp_mask         2068 drivers/infiniband/core/sa_query.c 	query = kzalloc(sizeof(*query), gfp_mask);
gfp_mask         2076 drivers/infiniband/core/sa_query.c 	ret = alloc_mad(&query->sa_query, gfp_mask);
gfp_mask         2093 drivers/infiniband/core/sa_query.c 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
gfp_mask          909 drivers/infiniband/core/uverbs_main.c 			alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
gfp_mask          280 drivers/infiniband/hw/hns/hns_roce_hem.c 					       gfp_t gfp_mask)
gfp_mask          288 drivers/infiniband/hw/hns/hns_roce_hem.c 	WARN_ON(gfp_mask & __GFP_HIGHMEM);
gfp_mask          291 drivers/infiniband/hw/hns/hns_roce_hem.c 		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
gfp_mask          303 drivers/infiniband/hw/hns/hns_roce_hem.c 				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
gfp_mask          323 drivers/infiniband/hw/hns/hns_roce_hem.c 				&sg_dma_address(mem), gfp_mask);
gfp_mask          610 drivers/infiniband/hw/mthca/mthca_cmd.c 					  gfp_t gfp_mask)
gfp_mask          614 drivers/infiniband/hw/mthca/mthca_cmd.c 	mailbox = kmalloc(sizeof *mailbox, gfp_mask);
gfp_mask          618 drivers/infiniband/hw/mthca/mthca_cmd.c 	mailbox->buf = dma_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
gfp_mask          252 drivers/infiniband/hw/mthca/mthca_cmd.h 					  gfp_t gfp_mask);
gfp_mask          107 drivers/infiniband/hw/mthca/mthca_memfree.c static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
gfp_mask          115 drivers/infiniband/hw/mthca/mthca_memfree.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
gfp_mask          124 drivers/infiniband/hw/mthca/mthca_memfree.c 				    int order, gfp_t gfp_mask)
gfp_mask          127 drivers/infiniband/hw/mthca/mthca_memfree.c 				       gfp_mask);
gfp_mask          138 drivers/infiniband/hw/mthca/mthca_memfree.c 				  gfp_t gfp_mask, int coherent)
gfp_mask          146 drivers/infiniband/hw/mthca/mthca_memfree.c 	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
gfp_mask          148 drivers/infiniband/hw/mthca/mthca_memfree.c 	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
gfp_mask          160 drivers/infiniband/hw/mthca/mthca_memfree.c 					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
gfp_mask          176 drivers/infiniband/hw/mthca/mthca_memfree.c 						       cur_order, gfp_mask);
gfp_mask          179 drivers/infiniband/hw/mthca/mthca_memfree.c 						    cur_order, gfp_mask);
gfp_mask           83 drivers/infiniband/hw/mthca/mthca_memfree.h 				  gfp_t gfp_mask, int coherent);
gfp_mask          232 drivers/infiniband/ulp/srp/ib_srp.c 				   gfp_t gfp_mask,
gfp_mask          237 drivers/infiniband/ulp/srp/ib_srp.c 	iu = kmalloc(sizeof *iu, gfp_mask);
gfp_mask          241 drivers/infiniband/ulp/srp/ib_srp.c 	iu->buf = kzalloc(size, gfp_mask);
gfp_mask           35 drivers/isdn/mISDN/socket.c _l2_alloc_skb(unsigned int len, gfp_t gfp_mask)
gfp_mask           39 drivers/isdn/mISDN/socket.c 	skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
gfp_mask         1866 drivers/lightnvm/pblk-core.c 		      void (*work)(struct work_struct *), gfp_t gfp_mask,
gfp_mask         1871 drivers/lightnvm/pblk-core.c 	line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
gfp_mask          801 drivers/lightnvm/pblk.h 		     void (*work)(struct work_struct *), gfp_t gfp_mask,
gfp_mask          713 drivers/md/bcache/btree.c 	if (sc->gfp_mask & __GFP_IO)
gfp_mask         1921 drivers/md/bcache/super.c 			sc.gfp_mask = GFP_KERNEL;
gfp_mask          832 drivers/md/bcache/sysfs.c 		sc.gfp_mask = GFP_KERNEL;
gfp_mask          268 drivers/md/bcache/util.c int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
gfp_mask          278 drivers/md/bcache/util.c 		bv->bv_page = alloc_page(gfp_mask);
gfp_mask          587 drivers/md/bcache/util.h int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
gfp_mask          375 drivers/md/dm-bufio.c static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
gfp_mask          380 drivers/md/dm-bufio.c 		return kmem_cache_alloc(c->slab_cache, gfp_mask);
gfp_mask          384 drivers/md/dm-bufio.c 	    gfp_mask & __GFP_NORETRY) {
gfp_mask          386 drivers/md/dm-bufio.c 		return (void *)__get_free_pages(gfp_mask,
gfp_mask          401 drivers/md/dm-bufio.c 	if (gfp_mask & __GFP_NORETRY) {
gfp_mask          403 drivers/md/dm-bufio.c 		void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
gfp_mask          409 drivers/md/dm-bufio.c 	return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
gfp_mask          442 drivers/md/dm-bufio.c static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
gfp_mask          444 drivers/md/dm-bufio.c 	struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
gfp_mask          451 drivers/md/dm-bufio.c 	b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
gfp_mask         1553 drivers/md/dm-bufio.c 			    gfp_t gfp_mask)
gfp_mask         1564 drivers/md/dm-bufio.c 			if (__try_evict_buffer(b, gfp_mask))
gfp_mask         1581 drivers/md/dm-bufio.c 	if (sc->gfp_mask & __GFP_FS)
gfp_mask         1586 drivers/md/dm-bufio.c 	freed  = __scan(c, sc->nr_to_scan, sc->gfp_mask);
gfp_mask         1301 drivers/md/dm-crypt.c 	gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
gfp_mask         1306 drivers/md/dm-crypt.c 	if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
gfp_mask         1318 drivers/md/dm-crypt.c 		page = mempool_alloc(&cc->page_pool, gfp_mask);
gfp_mask         1322 drivers/md/dm-crypt.c 			gfp_mask |= __GFP_DIRECT_RECLAIM;
gfp_mask         1340 drivers/md/dm-crypt.c 	if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))
gfp_mask         2090 drivers/md/dm-crypt.c static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
gfp_mask         2096 drivers/md/dm-crypt.c 	    likely(gfp_mask & __GFP_NORETRY))
gfp_mask         2099 drivers/md/dm-crypt.c 	page = alloc_page(gfp_mask);
gfp_mask          343 drivers/md/dm-rq.c 		       struct dm_rq_target_io *tio, gfp_t gfp_mask)
gfp_mask          347 drivers/md/dm-rq.c 	r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
gfp_mask          569 drivers/md/dm-verity-fec.c static void *fec_rs_alloc(gfp_t gfp_mask, void *pool_data)
gfp_mask          573 drivers/md/dm-verity-fec.c 	return init_rs_gfp(8, 0x11d, 0, 1, v->fec->roots, gfp_mask);
gfp_mask          598 drivers/md/dm.c 				      unsigned target_bio_nr, gfp_t gfp_mask)
gfp_mask          606 drivers/md/dm.c 		struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
gfp_mask          253 drivers/md/md.c struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
gfp_mask          257 drivers/md/md.c 		return bio_alloc(gfp_mask, nr_iovecs);
gfp_mask          259 drivers/md/md.c 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
gfp_mask          734 drivers/md/md.h extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
gfp_mask          205 drivers/md/raid5-ppl.c static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data)
gfp_mask          210 drivers/md/raid5-ppl.c 	io = kmem_cache_alloc(kc, gfp_mask);
gfp_mask          214 drivers/md/raid5-ppl.c 	io->header_page = alloc_page(gfp_mask);
gfp_mask          547 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			      u16 index, gfp_t gfp_mask)
gfp_mask          555 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
gfp_mask          683 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
gfp_mask          687 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
gfp_mask          688 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			return (void *)__get_free_page(gfp_mask);
gfp_mask          693 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
gfp_mask          827 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			       u16 index, gfp_t gfp_mask)
gfp_mask          834 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	data = bnx2x_frag_alloc(fp, gfp_mask);
gfp_mask         1824 drivers/net/ethernet/marvell/mvneta.c 			    gfp_t gfp_mask)
gfp_mask         1829 drivers/net/ethernet/marvell/mvneta.c 	page = __dev_alloc_page(gfp_mask);
gfp_mask          634 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c 			     gfp_t gfp_mask)
gfp_mask           99 drivers/net/ethernet/mellanox/mlx4/icm.c 				gfp_t gfp_mask, int node)
gfp_mask          103 drivers/net/ethernet/mellanox/mlx4/icm.c 	page = alloc_pages_node(node, gfp_mask, order);
gfp_mask          105 drivers/net/ethernet/mellanox/mlx4/icm.c 		page = alloc_pages(gfp_mask, order);
gfp_mask          115 drivers/net/ethernet/mellanox/mlx4/icm.c 				   int order, gfp_t gfp_mask)
gfp_mask          118 drivers/net/ethernet/mellanox/mlx4/icm.c 				       &buf->dma_addr, gfp_mask);
gfp_mask          133 drivers/net/ethernet/mellanox/mlx4/icm.c 				gfp_t gfp_mask, int coherent)
gfp_mask          142 drivers/net/ethernet/mellanox/mlx4/icm.c 	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
gfp_mask          145 drivers/net/ethernet/mellanox/mlx4/icm.c 			   gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
gfp_mask          149 drivers/net/ethernet/mellanox/mlx4/icm.c 			      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
gfp_mask          162 drivers/net/ethernet/mellanox/mlx4/icm.c 					     gfp_mask & ~(__GFP_HIGHMEM |
gfp_mask          167 drivers/net/ethernet/mellanox/mlx4/icm.c 						gfp_mask & ~(__GFP_HIGHMEM |
gfp_mask          182 drivers/net/ethernet/mellanox/mlx4/icm.c 		mask = gfp_mask;
gfp_mask           81 drivers/net/ethernet/mellanox/mlx4/icm.h 				gfp_t gfp_mask, int coherent);
gfp_mask          301 drivers/net/virtio_net.c static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
gfp_mask          310 drivers/net/virtio_net.c 		p = alloc_page(gfp_mask);
gfp_mask          196 drivers/net/wireless/ath/ath.h 				gfp_t gfp_mask);
gfp_mask           31 drivers/net/wireless/ath/main.c 				gfp_t gfp_mask)
gfp_mask           49 drivers/net/wireless/ath/main.c 	skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask);
gfp_mask          178 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 						 int headroom, gfp_t gfp_mask)
gfp_mask          183 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 		      gfp_mask);
gfp_mask          193 drivers/net/wireless/intel/ipw2x00/libipw_tx.c 						    gfp_mask);
gfp_mask          988 drivers/net/wireless/intel/iwlegacy/3945-mac.c 	gfp_t gfp_mask = priority;
gfp_mask          999 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			gfp_mask |= __GFP_NOWARN;
gfp_mask         1002 drivers/net/wireless/intel/iwlegacy/3945-mac.c 			gfp_mask |= __GFP_COMP;
gfp_mask         1005 drivers/net/wireless/intel/iwlegacy/3945-mac.c 		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
gfp_mask          307 drivers/net/wireless/intel/iwlegacy/4965-mac.c 	gfp_t gfp_mask = priority;
gfp_mask          318 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			gfp_mask |= __GFP_NOWARN;
gfp_mask          321 drivers/net/wireless/intel/iwlegacy/4965-mac.c 			gfp_mask |= __GFP_COMP;
gfp_mask          324 drivers/net/wireless/intel/iwlegacy/4965-mac.c 		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
gfp_mask          423 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	gfp_t gfp_mask = priority;
gfp_mask          426 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		gfp_mask |= __GFP_COMP;
gfp_mask          429 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
gfp_mask          438 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
gfp_mask          553 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 		gfp_t gfp_mask = GFP_KERNEL;
gfp_mask          557 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			gfp_mask |= __GFP_NOWARN;
gfp_mask          575 drivers/net/wireless/intel/iwlwifi/pcie/rx.c 			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
gfp_mask          401 drivers/net/wireless/realtek/rtlwifi/usb.c 			      struct urb *urb, gfp_t gfp_mask)
gfp_mask          405 drivers/net/wireless/realtek/rtlwifi/usb.c 	buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask,
gfp_mask          517 drivers/net/wireless/zydas/zd1201.c 	gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC;
gfp_mask          524 drivers/net/wireless/zydas/zd1201.c 		request = kmalloc(16, gfp_mask);
gfp_mask          527 drivers/net/wireless/zydas/zd1201.c 		urb = usb_alloc_urb(0, gfp_mask);
gfp_mask          553 drivers/net/wireless/zydas/zd1201.c 		err = usb_submit_urb(urb, gfp_mask);
gfp_mask          558 drivers/net/wireless/zydas/zd1201.c 	request = kmalloc(16, gfp_mask);
gfp_mask          561 drivers/net/wireless/zydas/zd1201.c 	urb = usb_alloc_urb(0, gfp_mask);
gfp_mask          574 drivers/net/wireless/zydas/zd1201.c 	err = usb_submit_urb(urb, gfp_mask);
gfp_mask          363 drivers/scsi/aic94xx/aic94xx_hwi.h 				     gfp_t gfp_mask);
gfp_mask          371 drivers/scsi/hosts.c 	gfp_t gfp_mask = GFP_KERNEL;
gfp_mask          375 drivers/scsi/hosts.c 		gfp_mask |= __GFP_DMA;
gfp_mask          377 drivers/scsi/hosts.c 	shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask);
gfp_mask           76 drivers/scsi/scsi_lib.c 	gfp_t gfp_mask, int numa_node)
gfp_mask           79 drivers/scsi/scsi_lib.c 				     gfp_mask, numa_node);
gfp_mask         1877 drivers/scsi/sg.c 	gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO;
gfp_mask         1905 drivers/scsi/sg.c 		gfp_mask |= GFP_DMA;
gfp_mask         1917 drivers/scsi/sg.c 		schp->pages[k] = alloc_pages(gfp_mask, order);
gfp_mask          470 drivers/staging/android/ashmem.c 	if (!(sc->gfp_mask & __GFP_FS))
gfp_mask          843 drivers/staging/android/ashmem.c 				.gfp_mask = GFP_KERNEL,
gfp_mask          547 drivers/staging/android/ion/ion.c 	sc.gfp_mask = GFP_HIGHUSER;
gfp_mask          565 drivers/staging/android/ion/ion.c 	sc.gfp_mask = GFP_HIGHUSER;
gfp_mask           94 drivers/staging/android/ion/ion.h 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
gfp_mask          283 drivers/staging/android/ion/ion.h 	gfp_t gfp_mask;
gfp_mask          288 drivers/staging/android/ion/ion.h struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
gfp_mask          300 drivers/staging/android/ion/ion.h int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
gfp_mask          273 drivers/staging/android/ion/ion_heap.c 		total += heap->ops->shrink(heap, sc->gfp_mask, 0);
gfp_mask          302 drivers/staging/android/ion/ion_heap.c 		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
gfp_mask           19 drivers/staging/android/ion/ion_page_pool.c 	return alloc_pages(pool->gfp_mask, pool->order);
gfp_mask          100 drivers/staging/android/ion/ion_page_pool.c int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
gfp_mask          109 drivers/staging/android/ion/ion_page_pool.c 		high = !!(gfp_mask & __GFP_HIGHMEM);
gfp_mask          134 drivers/staging/android/ion/ion_page_pool.c struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
gfp_mask          144 drivers/staging/android/ion/ion_page_pool.c 	pool->gfp_mask = gfp_mask | __GFP_COMP;
gfp_mask          171 drivers/staging/android/ion/ion_system_heap.c static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
gfp_mask          190 drivers/staging/android/ion/ion_system_heap.c 							 gfp_mask,
gfp_mask          195 drivers/staging/android/ion/ion_system_heap.c 							gfp_mask,
gfp_mask           59 drivers/staging/fwserial/dma_fifo.c 		   int tx_limit, int open_limit, gfp_t gfp_mask)
gfp_mask           68 drivers/staging/fwserial/dma_fifo.c 	fifo->data = kmalloc(capacity, gfp_mask);
gfp_mask           77 drivers/staging/fwserial/dma_fifo.h 		   int tx_limit, int open_limit, gfp_t gfp_mask);
gfp_mask          203 drivers/staging/rtl8192e/rtllib_tx.c 					   gfp_t gfp_mask)
gfp_mask          209 drivers/staging/rtl8192e/rtllib_tx.c 		      gfp_mask);
gfp_mask          227 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 						 gfp_t gfp_mask)
gfp_mask          233 drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c 		gfp_mask);
gfp_mask          221 drivers/staging/uwb/uwb-internal.h extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
gfp_mask          256 drivers/staging/wusbcore/wa-hc.h static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
gfp_mask          261 drivers/staging/wusbcore/wa-hc.h 	return usb_submit_urb(urb, gfp_mask);
gfp_mask          363 drivers/tty/sysrq.c 	const gfp_t gfp_mask = GFP_KERNEL;
gfp_mask          365 drivers/tty/sysrq.c 		.zonelist = node_zonelist(first_memory_node, gfp_mask),
gfp_mask          368 drivers/tty/sysrq.c 		.gfp_mask = gfp_mask,
gfp_mask          271 fs/afs/dir.c   			gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
gfp_mask          209 fs/afs/dir_edit.c 	gfp = vnode->vfs_inode.i_mapping->gfp_mask;
gfp_mask          241 fs/afs/dir_edit.c 			gfp = vnode->vfs_inode.i_mapping->gfp_mask;
gfp_mask          338 fs/btrfs/backref.c 			  struct share_check *sc, gfp_t gfp_mask)
gfp_mask          345 fs/btrfs/backref.c 	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
gfp_mask          391 fs/btrfs/backref.c 			  struct share_check *sc, gfp_t gfp_mask)
gfp_mask          394 fs/btrfs/backref.c 			      parent, wanted_disk_byte, count, sc, gfp_mask);
gfp_mask          402 fs/btrfs/backref.c 			    struct share_check *sc, gfp_t gfp_mask)
gfp_mask          409 fs/btrfs/backref.c 			      wanted_disk_byte, count, sc, gfp_mask);
gfp_mask           92 fs/btrfs/ulist.c struct ulist *ulist_alloc(gfp_t gfp_mask)
gfp_mask           94 fs/btrfs/ulist.c 	struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
gfp_mask          186 fs/btrfs/ulist.c int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
gfp_mask          188 fs/btrfs/ulist.c 	return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
gfp_mask          192 fs/btrfs/ulist.c 		    u64 *old_aux, gfp_t gfp_mask)
gfp_mask          203 fs/btrfs/ulist.c 	node = kmalloc(sizeof(*node), gfp_mask);
gfp_mask           48 fs/btrfs/ulist.h struct ulist *ulist_alloc(gfp_t gfp_mask);
gfp_mask           50 fs/btrfs/ulist.h int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
gfp_mask           52 fs/btrfs/ulist.h 		    u64 *old_aux, gfp_t gfp_mask);
gfp_mask           57 fs/btrfs/ulist.h 				      void **old_aux, gfp_t gfp_mask)
gfp_mask           61 fs/btrfs/ulist.h 	int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
gfp_mask           65 fs/btrfs/ulist.h 	return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
gfp_mask          937 fs/buffer.c    	gfp_t gfp_mask;
gfp_mask          939 fs/buffer.c    	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
gfp_mask          947 fs/buffer.c    	gfp_mask |= __GFP_NOFAIL;
gfp_mask          949 fs/buffer.c    	page = find_or_create_page(inode->i_mapping, index, gfp_mask);
gfp_mask         1137 fs/ecryptfs/crypto.c static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
gfp_mask         1142 fs/ecryptfs/crypto.c 	page = alloc_pages(gfp_mask | __GFP_ZERO, order);
gfp_mask          296 fs/erofs/super.c static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
gfp_mask         2156 fs/f2fs/f2fs.h 				int fgp_flags, gfp_t gfp_mask)
gfp_mask         2163 fs/f2fs/f2fs.h 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
gfp_mask          770 fs/gfs2/aops.c int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
gfp_mask         1617 fs/gfs2/glock.c 	if (!(sc->gfp_mask & __GFP_FS))
gfp_mask           15 fs/gfs2/inode.h extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
gfp_mask          169 fs/gfs2/quota.c 	if (!(sc->gfp_mask & __GFP_FS))
gfp_mask          456 fs/iomap/buffered-io.c iomap_releasepage(struct page *page, gfp_t gfp_mask)
gfp_mask          141 fs/jbd2/revoke.c 	gfp_t gfp_mask = GFP_NOFS;
gfp_mask          144 fs/jbd2/revoke.c 		gfp_mask |= __GFP_NOFAIL;
gfp_mask          145 fs/jbd2/revoke.c 	record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask);
gfp_mask          299 fs/jbd2/transaction.c 			     gfp_t gfp_mask)
gfp_mask          330 fs/jbd2/transaction.c 		if ((gfp_mask & __GFP_FS) == 0)
gfp_mask          331 fs/jbd2/transaction.c 			gfp_mask |= __GFP_NOFAIL;
gfp_mask          333 fs/jbd2/transaction.c 						    gfp_mask);
gfp_mask          441 fs/jbd2/transaction.c 			      gfp_t gfp_mask, unsigned int type,
gfp_mask          472 fs/jbd2/transaction.c 	err = start_this_handle(journal, handle, gfp_mask);
gfp_mask          675 fs/jbd2/transaction.c int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
gfp_mask          724 fs/jbd2/transaction.c 	ret = start_this_handle(journal, handle, gfp_mask);
gfp_mask         2073 fs/jbd2/transaction.c 				struct page *page, gfp_t gfp_mask)
gfp_mask          173 fs/jfs/jfs_metapage.c static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
gfp_mask          175 fs/jfs/jfs_metapage.c 	struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
gfp_mask          528 fs/jfs/jfs_metapage.c static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
gfp_mask          588 fs/nfs/blocklayout/blocklayout.c 		gfp_t gfp_mask)
gfp_mask          594 fs/nfs/blocklayout/blocklayout.c 	node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
gfp_mask          613 fs/nfs/blocklayout/blocklayout.c 		gfp_t gfp_mask)
gfp_mask          632 fs/nfs/blocklayout/blocklayout.c 						lo->plh_lc_cred, gfp_mask);
gfp_mask          669 fs/nfs/blocklayout/blocklayout.c 		gfp_t gfp_mask)
gfp_mask          689 fs/nfs/blocklayout/blocklayout.c 	lseg = kzalloc(sizeof(*lseg), gfp_mask);
gfp_mask          694 fs/nfs/blocklayout/blocklayout.c 	scratch = alloc_page(gfp_mask);
gfp_mask          715 fs/nfs/blocklayout/blocklayout.c 		status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
gfp_mask          176 fs/nfs/blocklayout/blocklayout.h 		struct pnfs_device *pdev, gfp_t gfp_mask);
gfp_mask          193 fs/nfs/blocklayout/blocklayout.h 		struct pnfs_block_volume *b, gfp_t gfp_mask);
gfp_mask          231 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
gfp_mask          236 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          242 fs/nfs/blocklayout/dev.c 	dev = bl_resolve_deviceid(server, v, gfp_mask);
gfp_mask          353 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          402 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          407 fs/nfs/blocklayout/dev.c 	ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask);
gfp_mask          418 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          431 fs/nfs/blocklayout/dev.c 				volumes, v->concat.volumes[i], gfp_mask);
gfp_mask          447 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          460 fs/nfs/blocklayout/dev.c 				volumes, v->stripe.volumes[i], gfp_mask);
gfp_mask          476 fs/nfs/blocklayout/dev.c 		struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask)
gfp_mask          480 fs/nfs/blocklayout/dev.c 		return bl_parse_simple(server, d, volumes, idx, gfp_mask);
gfp_mask          482 fs/nfs/blocklayout/dev.c 		return bl_parse_slice(server, d, volumes, idx, gfp_mask);
gfp_mask          484 fs/nfs/blocklayout/dev.c 		return bl_parse_concat(server, d, volumes, idx, gfp_mask);
gfp_mask          486 fs/nfs/blocklayout/dev.c 		return bl_parse_stripe(server, d, volumes, idx, gfp_mask);
gfp_mask          488 fs/nfs/blocklayout/dev.c 		return bl_parse_scsi(server, d, volumes, idx, gfp_mask);
gfp_mask          497 fs/nfs/blocklayout/dev.c 		gfp_t gfp_mask)
gfp_mask          508 fs/nfs/blocklayout/dev.c 	scratch = alloc_page(gfp_mask);
gfp_mask          521 fs/nfs/blocklayout/dev.c 			  gfp_mask);
gfp_mask          531 fs/nfs/blocklayout/dev.c 	top = kzalloc(sizeof(*top), gfp_mask);
gfp_mask          535 fs/nfs/blocklayout/dev.c 	ret = bl_parse_deviceid(server, top, volumes, nr_volumes - 1, gfp_mask);
gfp_mask           54 fs/nfs/blocklayout/rpc_pipefs.c 		gfp_t gfp_mask)
gfp_mask           77 fs/nfs/blocklayout/rpc_pipefs.c 	msg->data = kzalloc(msg->len, gfp_mask);
gfp_mask         2235 fs/nfs/dir.c   	gfp_t gfp_mask = sc->gfp_mask;
gfp_mask         2237 fs/nfs/dir.c   	if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
gfp_mask          299 fs/nfs/nfs4_fs.h extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
gfp_mask          495 fs/nfs/nfs4_fs.h extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
gfp_mask         1271 fs/nfs/nfs4proc.c 		gfp_t gfp_mask)
gfp_mask         1280 fs/nfs/nfs4proc.c 	p = kzalloc(sizeof(*p), gfp_mask);
gfp_mask         1284 fs/nfs/nfs4proc.c 	p->f_label = nfs4_label_alloc(server, gfp_mask);
gfp_mask         1288 fs/nfs/nfs4proc.c 	p->a_label = nfs4_label_alloc(server, gfp_mask);
gfp_mask         1293 fs/nfs/nfs4proc.c 	p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
gfp_mask         3624 fs/nfs/nfs4proc.c int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
gfp_mask         3647 fs/nfs/nfs4proc.c 	calldata = kzalloc(sizeof(*calldata), gfp_mask);
gfp_mask         3658 fs/nfs/nfs4proc.c 	calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
gfp_mask         6710 fs/nfs/nfs4proc.c 		gfp_t gfp_mask)
gfp_mask         6717 fs/nfs/nfs4proc.c 	p = kzalloc(sizeof(*p), gfp_mask);
gfp_mask         6723 fs/nfs/nfs4proc.c 	p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
gfp_mask         6727 fs/nfs/nfs4proc.c 	p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
gfp_mask          105 fs/nfs/nfs4session.c 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
gfp_mask          109 fs/nfs/nfs4session.c 	slot = kzalloc(sizeof(*slot), gfp_mask);
gfp_mask          121 fs/nfs/nfs4session.c 		u32 slotid, u32 seq_init, gfp_t gfp_mask)
gfp_mask          129 fs/nfs/nfs4session.c 					seq_init, gfp_mask);
gfp_mask          775 fs/nfs/nfs4state.c 		fmode_t fmode, gfp_t gfp_mask, int wait)
gfp_mask          816 fs/nfs/nfs4state.c 		nfs4_do_close(state, gfp_mask, wait);
gfp_mask         1071 fs/nfs/nfs4state.c struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
gfp_mask         1075 fs/nfs/nfs4state.c 	new = kmalloc(sizeof(*new), gfp_mask);
gfp_mask          352 fs/nfs/pnfs.h  		gfp_t gfp_mask);
gfp_mask          188 fs/nfs/pnfs_dev.c 		gfp_t gfp_mask)
gfp_mask          197 fs/nfs/pnfs_dev.c 	new = nfs4_get_device_info(server, id, cred, gfp_mask);
gfp_mask          444 fs/nilfs2/mdt.c int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
gfp_mask          456 fs/nilfs2/mdt.c 	mapping_set_gfp_mask(inode->i_mapping, gfp_mask);
gfp_mask           80 fs/nilfs2/mdt.h int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
gfp_mask           28 fs/ntfs/malloc.h static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
gfp_mask           33 fs/ntfs/malloc.h 		return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM);
gfp_mask           37 fs/ntfs/malloc.h 		return __vmalloc(size, gfp_mask, PAGE_KERNEL);
gfp_mask           77 fs/super.c     	if (!(sc->gfp_mask & __GFP_FS))
gfp_mask         1129 fs/xfs/xfs_aops.c 	gfp_t			gfp_mask)
gfp_mask         1132 fs/xfs/xfs_aops.c 	return iomap_releasepage(page, gfp_mask);
gfp_mask          344 fs/xfs/xfs_buf.c 	gfp_t			gfp_mask = xb_to_gfp(flags);
gfp_mask          355 fs/xfs/xfs_buf.c 		gfp_mask |= __GFP_ZERO;
gfp_mask          404 fs/xfs/xfs_buf.c 		page = alloc_page(gfp_mask);
gfp_mask          422 fs/xfs/xfs_buf.c 					__func__, gfp_mask);
gfp_mask         1268 fs/xfs/xfs_iops.c 	gfp_t			gfp_mask;
gfp_mask         1304 fs/xfs/xfs_iops.c 	gfp_mask = mapping_gfp_mask(inode->i_mapping);
gfp_mask         1305 fs/xfs/xfs_iops.c 	mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS)));
gfp_mask          505 fs/xfs/xfs_qm.c 	if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
gfp_mask          134 include/linux/audit.h void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
gfp_mask          137 include/linux/audit.h extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type);
gfp_mask          189 include/linux/audit.h void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
gfp_mask          193 include/linux/audit.h 						   gfp_t gfp_mask, int type)
gfp_mask           39 include/linux/backing-dev.h struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
gfp_mask           40 include/linux/backing-dev.h static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
gfp_mask           42 include/linux/backing-dev.h 	return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
gfp_mask          398 include/linux/bio.h static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
gfp_mask          400 include/linux/bio.h 	return bio_alloc_bioset(gfp_mask, nr_iovecs, &fs_bio_set);
gfp_mask          403 include/linux/bio.h static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
gfp_mask          405 include/linux/bio.h 	return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
gfp_mask          783 include/linux/bio.h 				      gfp_t gfp_mask)
gfp_mask          364 include/linux/blkdev.h 			      sector_t nr_sectors, gfp_t gfp_mask);
gfp_mask          858 include/linux/blkdev.h 			     struct bio_set *bs, gfp_t gfp_mask,
gfp_mask         1147 include/linux/blkdev.h struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
gfp_mask         1212 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
gfp_mask         1217 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
gfp_mask         1219 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, int flags,
gfp_mask         1226 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
gfp_mask         1229 include/linux/blkdev.h 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
gfp_mask         1232 include/linux/blkdev.h 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
gfp_mask         1239 include/linux/blkdev.h 				    gfp_mask, flags);
gfp_mask         1242 include/linux/blkdev.h 		sector_t nr_blocks, gfp_t gfp_mask)
gfp_mask         1249 include/linux/blkdev.h 				    gfp_mask, 0);
gfp_mask         1815 include/linux/blkdev.h static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
gfp_mask           49 include/linux/btree.h void *btree_alloc(gfp_t gfp_mask, void *pool_data);
gfp_mask           94 include/linux/compaction.h extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
gfp_mask           99 include/linux/connector.h int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
gfp_mask          122 include/linux/connector.h int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
gfp_mask           67 include/linux/cpuset.h extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
gfp_mask           69 include/linux/cpuset.h static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
gfp_mask           72 include/linux/cpuset.h 		return __cpuset_node_allowed(node, gfp_mask);
gfp_mask           76 include/linux/cpuset.h static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
gfp_mask           78 include/linux/cpuset.h 	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
gfp_mask           81 include/linux/cpuset.h static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
gfp_mask           84 include/linux/cpuset.h 		return __cpuset_zone_allowed(z, gfp_mask);
gfp_mask          207 include/linux/cpuset.h static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
gfp_mask          212 include/linux/cpuset.h static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
gfp_mask          217 include/linux/cpuset.h static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
gfp_mask          944 include/linux/device.h 					 gfp_t gfp_mask, unsigned int order);
gfp_mask          448 include/linux/fs.h 	gfp_t			gfp_mask;
gfp_mask          490 include/linux/gfp.h __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
gfp_mask          494 include/linux/gfp.h __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)
gfp_mask          496 include/linux/gfp.h 	return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);
gfp_mask          504 include/linux/gfp.h __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
gfp_mask          507 include/linux/gfp.h 	VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
gfp_mask          509 include/linux/gfp.h 	return __alloc_pages(gfp_mask, order, nid);
gfp_mask          517 include/linux/gfp.h static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
gfp_mask          523 include/linux/gfp.h 	return __alloc_pages_node(nid, gfp_mask, order);
gfp_mask          527 include/linux/gfp.h extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
gfp_mask          530 include/linux/gfp.h alloc_pages(gfp_t gfp_mask, unsigned int order)
gfp_mask          532 include/linux/gfp.h 	return alloc_pages_current(gfp_mask, order);
gfp_mask          534 include/linux/gfp.h extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
gfp_mask          537 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
gfp_mask          538 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
gfp_mask          540 include/linux/gfp.h #define alloc_pages(gfp_mask, order) \
gfp_mask          541 include/linux/gfp.h 		alloc_pages_node(numa_node_id(), gfp_mask, order)
gfp_mask          542 include/linux/gfp.h #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
gfp_mask          543 include/linux/gfp.h 	alloc_pages(gfp_mask, order)
gfp_mask          544 include/linux/gfp.h #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
gfp_mask          545 include/linux/gfp.h 	alloc_pages(gfp_mask, order)
gfp_mask          547 include/linux/gfp.h #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
gfp_mask          548 include/linux/gfp.h #define alloc_page_vma(gfp_mask, vma, addr)			\
gfp_mask          549 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
gfp_mask          550 include/linux/gfp.h #define alloc_page_vma_node(gfp_mask, vma, addr, node)		\
gfp_mask          551 include/linux/gfp.h 	alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
gfp_mask          553 include/linux/gfp.h extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
gfp_mask          554 include/linux/gfp.h extern unsigned long get_zeroed_page(gfp_t gfp_mask);
gfp_mask          556 include/linux/gfp.h void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
gfp_mask          558 include/linux/gfp.h void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
gfp_mask          560 include/linux/gfp.h #define __get_free_page(gfp_mask) \
gfp_mask          561 include/linux/gfp.h 		__get_free_pages((gfp_mask), 0)
gfp_mask          563 include/linux/gfp.h #define __get_dma_pages(gfp_mask, order) \
gfp_mask          564 include/linux/gfp.h 		__get_free_pages((gfp_mask) | GFP_DMA, (order))
gfp_mask          574 include/linux/gfp.h 			     unsigned int fragsz, gfp_t gfp_mask);
gfp_mask          597 include/linux/gfp.h bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
gfp_mask          614 include/linux/gfp.h 			      unsigned migratetype, gfp_t gfp_mask);
gfp_mask           36 include/linux/greybus/hd.h 			struct gb_message *message, gfp_t gfp_mask);
gfp_mask          367 include/linux/hugetlb.h struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask          112 include/linux/idr.h void idr_preload(gfp_t gfp_mask);
gfp_mask          160 include/linux/iomap.h int iomap_releasepage(struct page *page, gfp_t gfp_mask);
gfp_mask         1361 include/linux/jbd2.h 				     gfp_t gfp_mask, unsigned int type,
gfp_mask         1364 include/linux/jbd2.h extern int	 jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
gfp_mask          319 include/linux/kfifo.h #define kfifo_alloc(fifo, size, gfp_mask) \
gfp_mask          325 include/linux/kfifo.h 	__kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
gfp_mask          759 include/linux/kfifo.h 	size_t esize, gfp_t gfp_mask);
gfp_mask          538 include/linux/mISDNif.h mI_alloc_skb(unsigned int len, gfp_t gfp_mask)
gfp_mask          542 include/linux/mISDNif.h 	skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
gfp_mask          549 include/linux/mISDNif.h _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask)
gfp_mask          551 include/linux/mISDNif.h 	struct sk_buff	*skb = mI_alloc_skb(len, gfp_mask);
gfp_mask          566 include/linux/mISDNif.h     u_int id, u_int len, void *dp, gfp_t gfp_mask)
gfp_mask          572 include/linux/mISDNif.h 	skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
gfp_mask          376 include/linux/memcontrol.h 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
gfp_mask          379 include/linux/memcontrol.h 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
gfp_mask          747 include/linux/memcontrol.h 						gfp_t gfp_mask,
gfp_mask          863 include/linux/memcontrol.h 					gfp_t gfp_mask,
gfp_mask          873 include/linux/memcontrol.h 					      gfp_t gfp_mask,
gfp_mask         1140 include/linux/memcontrol.h 					    gfp_t gfp_mask,
gfp_mask           13 include/linux/mempool.h typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
gfp_mask           36 include/linux/mempool.h 		      gfp_t gfp_mask, int node_id);
gfp_mask           44 include/linux/mempool.h 			gfp_t gfp_mask, int nid);
gfp_mask           48 include/linux/mempool.h extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
gfp_mask           56 include/linux/mempool.h void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
gfp_mask           77 include/linux/mempool.h void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
gfp_mask           96 include/linux/mempool.h void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
gfp_mask           37 include/linux/migrate.h 	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
gfp_mask           46 include/linux/migrate.h 		gfp_mask |= GFP_TRANSHUGE;
gfp_mask           51 include/linux/migrate.h 		gfp_mask |= __GFP_HIGHMEM;
gfp_mask           53 include/linux/migrate.h 	new_page = __alloc_pages_nodemask(gfp_mask, order,
gfp_mask          420 include/linux/mm.h 	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
gfp_mask         1599 include/linux/mm.h extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
gfp_mask         2225 include/linux/mm.h void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
gfp_mask          813 include/linux/mmzone.h void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
gfp_mask          150 include/linux/netlink.h netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask          154 include/linux/netlink.h 	nskb = skb_clone(skb, gfp_mask);
gfp_mask           40 include/linux/oom.h 	const gfp_t gfp_mask;
gfp_mask           13 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask);
gfp_mask           28 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask)
gfp_mask           31 include/linux/page_owner.h 		__set_page_owner(page, order, gfp_mask);
gfp_mask           59 include/linux/page_owner.h 			unsigned int order, gfp_t gfp_mask)
gfp_mask          102 include/linux/pagemap.h 	return mapping->gfp_mask;
gfp_mask          107 include/linux/pagemap.h 		gfp_t gfp_mask)
gfp_mask          109 include/linux/pagemap.h 	return mapping_gfp_mask(mapping) & gfp_mask;
gfp_mask          118 include/linux/pagemap.h 	m->gfp_mask = mask;
gfp_mask          308 include/linux/pagemap.h 					pgoff_t offset, gfp_t gfp_mask)
gfp_mask          312 include/linux/pagemap.h 					gfp_mask);
gfp_mask          389 include/linux/pagemap.h 				pgoff_t index, gfp_t gfp_mask);
gfp_mask          608 include/linux/pagemap.h 				pgoff_t index, gfp_t gfp_mask);
gfp_mask          610 include/linux/pagemap.h 				pgoff_t index, gfp_t gfp_mask);
gfp_mask          613 include/linux/pagemap.h int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
gfp_mask          622 include/linux/pagemap.h 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
gfp_mask          627 include/linux/pagemap.h 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
gfp_mask          227 include/linux/radix-tree.h int radix_tree_preload(gfp_t gfp_mask);
gfp_mask          228 include/linux/radix-tree.h int radix_tree_maybe_preload(gfp_t gfp_mask);
gfp_mask          264 include/linux/scatterlist.h 	     gfp_t gfp_mask);
gfp_mask          278 include/linux/scatterlist.h 				gfp_t gfp_mask);
gfp_mask          281 include/linux/scatterlist.h 			      unsigned long size, gfp_t gfp_mask);
gfp_mask          203 include/linux/sched/mm.h extern void fs_reclaim_acquire(gfp_t gfp_mask);
gfp_mask          204 include/linux/sched/mm.h extern void fs_reclaim_release(gfp_t gfp_mask);
gfp_mask          208 include/linux/sched/mm.h static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
gfp_mask          209 include/linux/sched/mm.h static inline void fs_reclaim_release(gfp_t gfp_mask) { }
gfp_mask           75 include/linux/shmem_fs.h 					pgoff_t index, gfp_t gfp_mask);
gfp_mask           13 include/linux/shrinker.h 	gfp_t gfp_mask;
gfp_mask         1058 include/linux/skbuff.h 				     gfp_t gfp_mask);
gfp_mask         1106 include/linux/skbuff.h int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
gfp_mask         1111 include/linux/skbuff.h 				   gfp_t gfp_mask, bool fclone);
gfp_mask         1113 include/linux/skbuff.h 					  gfp_t gfp_mask)
gfp_mask         1115 include/linux/skbuff.h 	return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
gfp_mask         1118 include/linux/skbuff.h int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
gfp_mask         2726 include/linux/skbuff.h static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask         2733 include/linux/skbuff.h 	return skb_copy_ubufs(skb, gfp_mask);
gfp_mask         2737 include/linux/skbuff.h static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask         2741 include/linux/skbuff.h 	return skb_copy_ubufs(skb, gfp_mask);
gfp_mask         2765 include/linux/skbuff.h 				   gfp_t gfp_mask);
gfp_mask         2788 include/linux/skbuff.h 					      gfp_t gfp_mask)
gfp_mask         2790 include/linux/skbuff.h 	return __netdev_alloc_skb(NULL, length, gfp_mask);
gfp_mask         2823 include/linux/skbuff.h 				 unsigned int length, gfp_t gfp_mask);
gfp_mask         2843 include/linux/skbuff.h static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
gfp_mask         2854 include/linux/skbuff.h 	gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
gfp_mask         2856 include/linux/skbuff.h 	return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
gfp_mask         2872 include/linux/skbuff.h static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
gfp_mask         2874 include/linux/skbuff.h 	return __dev_alloc_pages(gfp_mask, 0);
gfp_mask         3079 include/linux/skbuff.h 					gfp_t gfp_mask)
gfp_mask         3081 include/linux/skbuff.h 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
gfp_mask         3086 include/linux/skbuff.h 						  gfp_t gfp_mask)
gfp_mask         3088 include/linux/skbuff.h 	return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
gfp_mask           54 include/linux/sunrpc/gss_api.h 		gfp_t			gfp_mask);
gfp_mask          115 include/linux/sunrpc/gss_api.h 			gfp_t			gfp_mask);
gfp_mask          297 include/linux/sunrpc/gss_krb5.h 		gfp_t gfp_mask);
gfp_mask          168 include/linux/sunrpc/xdr.h 				  struct xdr_netobj *src, gfp_t gfp_mask)
gfp_mask          170 include/linux/sunrpc/xdr.h 	dst->data = kmemdup(src->data, src->len, gfp_mask);
gfp_mask          443 include/linux/suspend.h extern unsigned long get_safe_page(gfp_t gfp_mask);
gfp_mask          353 include/linux/swap.h 					gfp_t gfp_mask, nodemask_t *mask);
gfp_mask          357 include/linux/swap.h 						  gfp_t gfp_mask,
gfp_mask          360 include/linux/swap.h 						gfp_t gfp_mask, bool noswap,
gfp_mask          516 include/linux/swap.h static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
gfp_mask          539 include/linux/swap.h 				gfp_t gfp_mask, struct vm_fault *vmf)
gfp_mask          544 include/linux/swap.h static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
gfp_mask          568 include/linux/swap.h 							gfp_t gfp_mask)
gfp_mask          650 include/linux/swap.h 					 gfp_t gfp_mask);
gfp_mask          653 include/linux/swap.h 						int node, gfp_t gfp_mask)
gfp_mask          163 include/linux/textsearch.h 						gfp_t gfp_mask)
gfp_mask          167 include/linux/textsearch.h 	conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
gfp_mask           39 include/linux/umh.h 			  gfp_t gfp_mask,
gfp_mask           99 include/linux/vmalloc.h extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
gfp_mask          101 include/linux/vmalloc.h 			unsigned long start, unsigned long end, gfp_t gfp_mask,
gfp_mask          861 include/net/sock.h static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
gfp_mask          863 include/net/sock.h 	return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
gfp_mask          888 include/rdma/ib_mad.h 					   gfp_t gfp_mask,
gfp_mask          455 include/rdma/ib_sa.h 		       gfp_t gfp_mask,
gfp_mask          464 include/rdma/ib_sa.h 			    gfp_t gfp_mask,
gfp_mask          509 include/rdma/ib_sa.h 					     ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
gfp_mask          571 include/rdma/ib_sa.h 			      unsigned long timeout_ms, gfp_t gfp_mask,
gfp_mask          174 include/trace/events/compaction.h 		gfp_t gfp_mask,
gfp_mask          177 include/trace/events/compaction.h 	TP_ARGS(order, gfp_mask, prio),
gfp_mask          181 include/trace/events/compaction.h 		__field(gfp_t, gfp_mask)
gfp_mask          187 include/trace/events/compaction.h 		__entry->gfp_mask = gfp_mask;
gfp_mask          193 include/trace/events/compaction.h 		show_gfp_flags(__entry->gfp_mask),
gfp_mask          211 include/trace/events/vmscan.h 		__entry->gfp_flags = sc->gfp_mask;
gfp_mask          196 kernel/audit.c 	gfp_t		     gfp_mask;
gfp_mask         1677 kernel/audit.c 					       gfp_t gfp_mask, int type)
gfp_mask         1681 kernel/audit.c 	ab = kmem_cache_alloc(audit_buffer_cache, gfp_mask);
gfp_mask         1685 kernel/audit.c 	ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
gfp_mask         1692 kernel/audit.c 	ab->gfp_mask = gfp_mask;
gfp_mask         1749 kernel/audit.c struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
gfp_mask         1779 kernel/audit.c 			if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
gfp_mask         1798 kernel/audit.c 	ab = audit_buffer_alloc(ctx, gfp_mask, type);
gfp_mask         1823 kernel/audit.c 	int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
gfp_mask         2031 kernel/audit.c 	pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
gfp_mask         2342 kernel/audit.c void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
gfp_mask         2348 kernel/audit.c 	ab = audit_log_start(ctx, gfp_mask, type);
gfp_mask          305 kernel/cgroup/cgroup.c 			    gfp_t gfp_mask)
gfp_mask          309 kernel/cgroup/cgroup.c 	idr_preload(gfp_mask);
gfp_mask          311 kernel/cgroup/cgroup.c 	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
gfp_mask         3417 kernel/cgroup/cpuset.c bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
gfp_mask         3433 kernel/cgroup/cpuset.c 	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
gfp_mask          145 kernel/kexec_core.c 				       gfp_t gfp_mask,
gfp_mask          299 kernel/kexec_core.c static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
gfp_mask          305 kernel/kexec_core.c 	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
gfp_mask          316 kernel/kexec_core.c 					    gfp_mask);
gfp_mask          318 kernel/kexec_core.c 		if (gfp_mask & __GFP_ZERO)
gfp_mask          679 kernel/kexec_core.c 					gfp_t gfp_mask,
gfp_mask          719 kernel/kexec_core.c 		page = kimage_alloc_pages(gfp_mask, 0);
gfp_mask          759 kernel/kexec_core.c 			if (!(gfp_mask & __GFP_HIGHMEM) &&
gfp_mask          156 kernel/power/snapshot.c static void *get_image_page(gfp_t gfp_mask, int safe_needed)
gfp_mask          160 kernel/power/snapshot.c 	res = (void *)get_zeroed_page(gfp_mask);
gfp_mask          166 kernel/power/snapshot.c 			res = (void *)get_zeroed_page(gfp_mask);
gfp_mask          175 kernel/power/snapshot.c static void *__get_safe_page(gfp_t gfp_mask)
gfp_mask          184 kernel/power/snapshot.c 	return get_image_page(gfp_mask, PG_SAFE);
gfp_mask          187 kernel/power/snapshot.c unsigned long get_safe_page(gfp_t gfp_mask)
gfp_mask          189 kernel/power/snapshot.c 	return (unsigned long)__get_safe_page(gfp_mask);
gfp_mask          192 kernel/power/snapshot.c static struct page *alloc_image_page(gfp_t gfp_mask)
gfp_mask          196 kernel/power/snapshot.c 	page = alloc_page(gfp_mask);
gfp_mask          262 kernel/power/snapshot.c 	gfp_t gfp_mask;		/* mask for allocating pages */
gfp_mask          266 kernel/power/snapshot.c static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
gfp_mask          271 kernel/power/snapshot.c 	ca->gfp_mask = gfp_mask;
gfp_mask          282 kernel/power/snapshot.c 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
gfp_mask          283 kernel/power/snapshot.c 					get_image_page(ca->gfp_mask, PG_ANY);
gfp_mask          402 kernel/power/snapshot.c static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
gfp_mask          412 kernel/power/snapshot.c 	node->data = get_image_page(gfp_mask, safe_needed);
gfp_mask          428 kernel/power/snapshot.c static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
gfp_mask          446 kernel/power/snapshot.c 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
gfp_mask          457 kernel/power/snapshot.c 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
gfp_mask          469 kernel/power/snapshot.c 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
gfp_mask          498 kernel/power/snapshot.c static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
gfp_mask          520 kernel/power/snapshot.c 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
gfp_mask          587 kernel/power/snapshot.c static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
gfp_mask          608 kernel/power/snapshot.c 			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
gfp_mask          643 kernel/power/snapshot.c static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
gfp_mask          651 kernel/power/snapshot.c 	chain_init(&ca, gfp_mask, safe_needed);
gfp_mask          654 kernel/power/snapshot.c 	error = create_mem_extents(&mem_extents, gfp_mask);
gfp_mask          661 kernel/power/snapshot.c 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
gfp_mask          380 kernel/umh.c   		char **envp, gfp_t gfp_mask,
gfp_mask          386 kernel/umh.c   	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
gfp_mask          632 kernel/umh.c   	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
gfp_mask          634 kernel/umh.c   	info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
gfp_mask           81 lib/btree.c    void *btree_alloc(gfp_t gfp_mask, void *pool_data)
gfp_mask           83 lib/btree.c    	return kmem_cache_alloc(btree_cachep, gfp_mask);
gfp_mask           79 lib/generic-radix-tree.c static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
gfp_mask           83 lib/generic-radix-tree.c 	node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
gfp_mask           90 lib/generic-radix-tree.c 	kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
gfp_mask          105 lib/generic-radix-tree.c 			   gfp_t gfp_mask)
gfp_mask          122 lib/generic-radix-tree.c 			new_node = genradix_alloc_node(gfp_mask);
gfp_mask          145 lib/generic-radix-tree.c 				new_node = genradix_alloc_node(gfp_mask);
gfp_mask          218 lib/generic-radix-tree.c 			gfp_t gfp_mask)
gfp_mask          223 lib/generic-radix-tree.c 		if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
gfp_mask           25 lib/kfifo.c    		size_t esize, gfp_t gfp_mask)
gfp_mask           43 lib/kfifo.c    	fifo->data = kmalloc_array(esize, size, gfp_mask);
gfp_mask          171 lib/kobject.c  char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
gfp_mask          179 lib/kobject.c  	path = kzalloc(len, gfp_mask);
gfp_mask          242 lib/radix-tree.c radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
gfp_mask          254 lib/radix-tree.c 	if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
gfp_mask          263 lib/radix-tree.c 				       gfp_mask | __GFP_NOWARN);
gfp_mask          285 lib/radix-tree.c 	ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
gfp_mask          331 lib/radix-tree.c static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
gfp_mask          341 lib/radix-tree.c 	gfp_mask &= ~__GFP_ACCOUNT;
gfp_mask          347 lib/radix-tree.c 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
gfp_mask          374 lib/radix-tree.c int radix_tree_preload(gfp_t gfp_mask)
gfp_mask          377 lib/radix-tree.c 	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
gfp_mask          378 lib/radix-tree.c 	return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
gfp_mask          387 lib/radix-tree.c int radix_tree_maybe_preload(gfp_t gfp_mask)
gfp_mask          389 lib/radix-tree.c 	if (gfpflags_allow_blocking(gfp_mask))
gfp_mask          390 lib/radix-tree.c 		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
gfp_mask         1478 lib/radix-tree.c void idr_preload(gfp_t gfp_mask)
gfp_mask         1480 lib/radix-tree.c 	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
gfp_mask          149 lib/scatterlist.c static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
gfp_mask          161 lib/scatterlist.c 		void *ptr = (void *) __get_free_page(gfp_mask);
gfp_mask          162 lib/scatterlist.c 		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
gfp_mask          166 lib/scatterlist.c 				     gfp_mask);
gfp_mask          268 lib/scatterlist.c 		     unsigned int nents_first_chunk, gfp_t gfp_mask,
gfp_mask          302 lib/scatterlist.c 			sg = alloc_fn(alloc_size, gfp_mask);
gfp_mask          355 lib/scatterlist.c int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
gfp_mask          360 lib/scatterlist.c 			       NULL, 0, gfp_mask, sg_kmalloc);
gfp_mask          392 lib/scatterlist.c 				gfp_t gfp_mask)
gfp_mask          413 lib/scatterlist.c 	ret = sg_alloc_table(sgt, chunks, gfp_mask);
gfp_mask          466 lib/scatterlist.c 			      unsigned long size, gfp_t gfp_mask)
gfp_mask          469 lib/scatterlist.c 					   SCATTERLIST_MAX_SEGMENT, gfp_mask);
gfp_mask           62 lib/sg_pool.c  static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
gfp_mask           67 lib/sg_pool.c  	return mempool_alloc(sgp->pool, gfp_mask);
gfp_mask          152 lib/sg_split.c 	     gfp_t gfp_mask)
gfp_mask          157 lib/sg_split.c 	splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
gfp_mask          170 lib/sg_split.c 						    gfp_mask);
gfp_mask          264 lib/textsearch.c 				     unsigned int len, gfp_t gfp_mask, int flags)
gfp_mask          289 lib/textsearch.c 	conf = ops->init(pattern, len, gfp_mask, flags);
gfp_mask          142 lib/ts_bm.c    				 gfp_t gfp_mask, int flags)
gfp_mask          150 lib/ts_bm.c    	conf = alloc_ts_config(priv_size, gfp_mask);
gfp_mask          256 lib/ts_fsm.c   				    gfp_t gfp_mask, int flags)
gfp_mask          282 lib/ts_fsm.c   	conf = alloc_ts_config(priv_size, gfp_mask);
gfp_mask           92 lib/ts_kmp.c   				  gfp_t gfp_mask, int flags)
gfp_mask          100 lib/ts_kmp.c   	conf = alloc_ts_config(priv_size, gfp_mask);
gfp_mask          868 mm/backing-dev.c struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
gfp_mask          873 mm/backing-dev.c 			   gfp_mask | __GFP_ZERO, node_id);
gfp_mask          948 mm/compaction.c 		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
gfp_mask         2092 mm/compaction.c 	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
gfp_mask         2289 mm/compaction.c 		gfp_t gfp_mask, enum compact_priority prio,
gfp_mask         2297 mm/compaction.c 		.gfp_mask = gfp_mask,
gfp_mask         2339 mm/compaction.c enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
gfp_mask         2343 mm/compaction.c 	int may_perform_io = gfp_mask & __GFP_IO;
gfp_mask         2355 mm/compaction.c 	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
gfp_mask         2368 mm/compaction.c 		status = compact_zone_order(zone, order, gfp_mask, prio,
gfp_mask         2419 mm/compaction.c 		.gfp_mask = GFP_KERNEL,
gfp_mask         2534 mm/compaction.c 		.gfp_mask = GFP_KERNEL,
gfp_mask          811 mm/filemap.c   int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
gfp_mask          852 mm/filemap.c   				      pgoff_t offset, gfp_t gfp_mask,
gfp_mask          867 mm/filemap.c   					      gfp_mask, &memcg, false);
gfp_mask          897 mm/filemap.c   	} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
gfp_mask          929 mm/filemap.c   		pgoff_t offset, gfp_t gfp_mask)
gfp_mask          932 mm/filemap.c   					  gfp_mask, NULL);
gfp_mask          937 mm/filemap.c   				pgoff_t offset, gfp_t gfp_mask)
gfp_mask          944 mm/filemap.c   					 gfp_mask, &shadow);
gfp_mask          957 mm/filemap.c   		if (!(gfp_mask & __GFP_WRITE) && shadow)
gfp_mask         1630 mm/filemap.c   	int fgp_flags, gfp_t gfp_mask)
gfp_mask         1667 mm/filemap.c   			gfp_mask |= __GFP_WRITE;
gfp_mask         1669 mm/filemap.c   			gfp_mask &= ~__GFP_FS;
gfp_mask         1671 mm/filemap.c   		page = __page_cache_alloc(gfp_mask);
gfp_mask         1682 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
gfp_mask         2512 mm/filemap.c   					  vmf->gfp_mask);
gfp_mask         3471 mm/filemap.c   int try_to_release_page(struct page *page, gfp_t gfp_mask)
gfp_mask         3480 mm/filemap.c   		return mapping->a_ops->releasepage(page, gfp_mask);
gfp_mask         1411 mm/gup.c       	gfp_t gfp_mask = GFP_USER | __GFP_NOWARN;
gfp_mask         1414 mm/gup.c       		gfp_mask |= __GFP_HIGHMEM;
gfp_mask         1423 mm/gup.c       		return alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
gfp_mask         1445 mm/gup.c       	return __alloc_pages_node(nid, gfp_mask, 0);
gfp_mask          894 mm/hugetlb.c   static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
gfp_mask          903 mm/hugetlb.c   	zonelist = node_zonelist(nid, gfp_mask);
gfp_mask          907 mm/hugetlb.c   	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
gfp_mask          910 mm/hugetlb.c   		if (!cpuset_zone_allowed(zone, gfp_mask))
gfp_mask          946 mm/hugetlb.c   	gfp_t gfp_mask;
gfp_mask          963 mm/hugetlb.c   	gfp_mask = htlb_alloc_mask(h);
gfp_mask          964 mm/hugetlb.c   	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
gfp_mask          965 mm/hugetlb.c   	page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
gfp_mask         1074 mm/hugetlb.c   				unsigned long nr_pages, gfp_t gfp_mask)
gfp_mask         1078 mm/hugetlb.c   				  gfp_mask);
gfp_mask         1115 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask         1125 mm/hugetlb.c   	zonelist = node_zonelist(nid, gfp_mask);
gfp_mask         1126 mm/hugetlb.c   	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
gfp_mask         1140 mm/hugetlb.c   				ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
gfp_mask         1157 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask         1165 mm/hugetlb.c   static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask         1456 mm/hugetlb.c   		gfp_t gfp_mask, int nid, nodemask_t *nmask,
gfp_mask         1472 mm/hugetlb.c   	gfp_mask |= __GFP_COMP|__GFP_NOWARN;
gfp_mask         1474 mm/hugetlb.c   		gfp_mask |= __GFP_RETRY_MAYFAIL;
gfp_mask         1477 mm/hugetlb.c   	page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
gfp_mask         1507 mm/hugetlb.c   		gfp_t gfp_mask, int nid, nodemask_t *nmask,
gfp_mask         1513 mm/hugetlb.c   		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
gfp_mask         1515 mm/hugetlb.c   		page = alloc_buddy_huge_page(h, gfp_mask,
gfp_mask         1536 mm/hugetlb.c   	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
gfp_mask         1539 mm/hugetlb.c   		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
gfp_mask         1671 mm/hugetlb.c   static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask         1684 mm/hugetlb.c   	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
gfp_mask         1712 mm/hugetlb.c   struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
gfp_mask         1720 mm/hugetlb.c   	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
gfp_mask         1742 mm/hugetlb.c   	gfp_t gfp_mask = htlb_alloc_mask(h);
gfp_mask         1746 mm/hugetlb.c   	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
gfp_mask         1747 mm/hugetlb.c   	page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
gfp_mask         1756 mm/hugetlb.c   	gfp_t gfp_mask = htlb_alloc_mask(h);
gfp_mask         1760 mm/hugetlb.c   		gfp_mask |= __GFP_THISNODE;
gfp_mask         1764 mm/hugetlb.c   		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
gfp_mask         1768 mm/hugetlb.c   		page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
gfp_mask         1777 mm/hugetlb.c   	gfp_t gfp_mask = htlb_alloc_mask(h);
gfp_mask         1783 mm/hugetlb.c   		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
gfp_mask         1791 mm/hugetlb.c   	return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
gfp_mask         1801 mm/hugetlb.c   	gfp_t gfp_mask;
gfp_mask         1804 mm/hugetlb.c   	gfp_mask = htlb_alloc_mask(h);
gfp_mask         1805 mm/hugetlb.c   	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
gfp_mask          193 mm/internal.h  	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
gfp_mask         1588 mm/memcontrol.c static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
gfp_mask         1595 mm/memcontrol.c 		.gfp_mask = gfp_mask,
gfp_mask         1711 mm/memcontrol.c 				   gfp_t gfp_mask,
gfp_mask         1750 mm/memcontrol.c 		total += mem_cgroup_shrink_node(victim, gfp_mask, false,
gfp_mask         2356 mm/memcontrol.c 			 gfp_t gfp_mask)
gfp_mask         2362 mm/memcontrol.c 		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
gfp_mask         2541 mm/memcontrol.c static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
gfp_mask         2582 mm/memcontrol.c 	if (gfp_mask & __GFP_ATOMIC)
gfp_mask         2606 mm/memcontrol.c 	if (!gfpflags_allow_blocking(gfp_mask))
gfp_mask         2612 mm/memcontrol.c 						    gfp_mask, may_swap);
gfp_mask         2623 mm/memcontrol.c 	if (gfp_mask & __GFP_NORETRY)
gfp_mask         2646 mm/memcontrol.c 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
gfp_mask         2649 mm/memcontrol.c 	if (gfp_mask & __GFP_NOFAIL)
gfp_mask         2660 mm/memcontrol.c 	oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
gfp_mask         2672 mm/memcontrol.c 	if (!(gfp_mask & __GFP_NOFAIL))
gfp_mask         3225 mm/memcontrol.c 					    gfp_t gfp_mask,
gfp_mask         3264 mm/memcontrol.c 						    gfp_mask, &nr_scanned);
gfp_mask         6514 mm/memcontrol.c 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
gfp_mask         6551 mm/memcontrol.c 	ret = try_charge(memcg, gfp_mask, nr_pages);
gfp_mask         6560 mm/memcontrol.c 			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
gfp_mask         6566 mm/memcontrol.c 	ret = mem_cgroup_try_charge(page, mm, gfp_mask, memcgp, compound);
gfp_mask         6568 mm/memcontrol.c 	mem_cgroup_throttle_swaprate(memcg, page_to_nid(page), gfp_mask);
gfp_mask         6900 mm/memcontrol.c 	gfp_t gfp_mask = GFP_KERNEL;
gfp_mask         6916 mm/memcontrol.c 		gfp_mask = GFP_NOWAIT;
gfp_mask         6920 mm/memcontrol.c 	if (try_charge(memcg, gfp_mask, nr_pages) == 0)
gfp_mask         6923 mm/memcontrol.c 	try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
gfp_mask         3917 mm/memory.c    		.gfp_mask = __get_fault_gfp_mask(vma),
gfp_mask          182 mm/mempool.c   		      gfp_t gfp_mask, int node_id)
gfp_mask          192 mm/mempool.c   					    gfp_mask, node_id);
gfp_mask          202 mm/mempool.c   		element = pool->alloc(gfp_mask, pool->pool_data);
gfp_mask          263 mm/mempool.c   			       gfp_t gfp_mask, int node_id)
gfp_mask          267 mm/mempool.c   	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
gfp_mask          272 mm/mempool.c   			      gfp_mask, node_id)) {
gfp_mask          375 mm/mempool.c   void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
gfp_mask          382 mm/mempool.c   	VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
gfp_mask          383 mm/mempool.c   	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
gfp_mask          385 mm/mempool.c   	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
gfp_mask          386 mm/mempool.c   	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
gfp_mask          387 mm/mempool.c   	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
gfp_mask          389 mm/mempool.c   	gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
gfp_mask          415 mm/mempool.c   	if (gfp_temp != gfp_mask) {
gfp_mask          417 mm/mempool.c   		gfp_temp = gfp_mask;
gfp_mask          422 mm/mempool.c   	if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
gfp_mask          509 mm/mempool.c   void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
gfp_mask          513 mm/mempool.c   	return kmem_cache_alloc(mem, gfp_mask);
gfp_mask          528 mm/mempool.c   void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
gfp_mask          531 mm/mempool.c   	return kmalloc(size, gfp_mask);
gfp_mask          545 mm/mempool.c   void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
gfp_mask          548 mm/mempool.c   	return alloc_pages(gfp_mask, order);
gfp_mask          143 mm/nommu.c     void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
gfp_mask          149 mm/nommu.c     	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
gfp_mask          256 mm/oom_kill.c  	enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
gfp_mask          278 mm/oom_kill.c  	if (oc->gfp_mask & __GFP_THISNODE)
gfp_mask          297 mm/oom_kill.c  		if (!cpuset_zone_allowed(zone, oc->gfp_mask))
gfp_mask          455 mm/oom_kill.c  		current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
gfp_mask         1075 mm/oom_kill.c  	if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
gfp_mask         1127 mm/oom_kill.c  		.gfp_mask = 0,
gfp_mask         3340 mm/page_alloc.c static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
gfp_mask         3344 mm/page_alloc.c 	if (gfp_mask & __GFP_NOFAIL)
gfp_mask         3346 mm/page_alloc.c 	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
gfp_mask         3349 mm/page_alloc.c 			(gfp_mask & __GFP_DIRECT_RECLAIM))
gfp_mask         3380 mm/page_alloc.c static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
gfp_mask         3387 mm/page_alloc.c static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
gfp_mask         3389 mm/page_alloc.c 	return __should_fail_alloc_page(gfp_mask, order);
gfp_mask         3545 mm/page_alloc.c alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
gfp_mask         3549 mm/page_alloc.c 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
gfp_mask         3578 mm/page_alloc.c get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
gfp_mask         3600 mm/page_alloc.c 			!__cpuset_zone_allowed(zone, gfp_mask))
gfp_mask         3671 mm/page_alloc.c 			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
gfp_mask         3691 mm/page_alloc.c 				gfp_mask, alloc_flags, ac->migratetype);
gfp_mask         3693 mm/page_alloc.c 			prep_new_page(page, order, gfp_mask, alloc_flags);
gfp_mask         3726 mm/page_alloc.c static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
gfp_mask         3735 mm/page_alloc.c 	if (!(gfp_mask & __GFP_NOMEMALLOC))
gfp_mask         3739 mm/page_alloc.c 	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
gfp_mask         3745 mm/page_alloc.c void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
gfp_mask         3751 mm/page_alloc.c 	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
gfp_mask         3758 mm/page_alloc.c 			current->comm, &vaf, gfp_mask, &gfp_mask,
gfp_mask         3765 mm/page_alloc.c 	warn_alloc_show_mem(gfp_mask, nodemask);
gfp_mask         3769 mm/page_alloc.c __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
gfp_mask         3775 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order,
gfp_mask         3782 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order,
gfp_mask         3789 mm/page_alloc.c __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
gfp_mask         3796 mm/page_alloc.c 		.gfp_mask = gfp_mask,
gfp_mask         3820 mm/page_alloc.c 	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
gfp_mask         3838 mm/page_alloc.c 	if (gfp_mask & __GFP_RETRY_MAYFAIL)
gfp_mask         3856 mm/page_alloc.c 	if (gfp_mask & __GFP_THISNODE)
gfp_mask         3860 mm/page_alloc.c 	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
gfp_mask         3867 mm/page_alloc.c 		if (gfp_mask & __GFP_NOFAIL)
gfp_mask         3868 mm/page_alloc.c 			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
gfp_mask         3885 mm/page_alloc.c __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
gfp_mask         3899 mm/page_alloc.c 	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
gfp_mask         3913 mm/page_alloc.c 		prep_new_page(page, order, gfp_mask, alloc_flags);
gfp_mask         3917 mm/page_alloc.c 		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
gfp_mask         4018 mm/page_alloc.c __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
gfp_mask         4058 mm/page_alloc.c static bool __need_fs_reclaim(gfp_t gfp_mask)
gfp_mask         4060 mm/page_alloc.c 	gfp_mask = current_gfp_context(gfp_mask);
gfp_mask         4063 mm/page_alloc.c 	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
gfp_mask         4071 mm/page_alloc.c 	if (!(gfp_mask & __GFP_FS))
gfp_mask         4074 mm/page_alloc.c 	if (gfp_mask & __GFP_NOLOCKDEP)
gfp_mask         4090 mm/page_alloc.c void fs_reclaim_acquire(gfp_t gfp_mask)
gfp_mask         4092 mm/page_alloc.c 	if (__need_fs_reclaim(gfp_mask))
gfp_mask         4097 mm/page_alloc.c void fs_reclaim_release(gfp_t gfp_mask)
gfp_mask         4099 mm/page_alloc.c 	if (__need_fs_reclaim(gfp_mask))
gfp_mask         4107 mm/page_alloc.c __perform_reclaim(gfp_t gfp_mask, unsigned int order,
gfp_mask         4119 mm/page_alloc.c 	fs_reclaim_acquire(gfp_mask);
gfp_mask         4122 mm/page_alloc.c 	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
gfp_mask         4126 mm/page_alloc.c 	fs_reclaim_release(gfp_mask);
gfp_mask         4136 mm/page_alloc.c __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
gfp_mask         4143 mm/page_alloc.c 	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
gfp_mask         4148 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
gfp_mask         4165 mm/page_alloc.c static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
gfp_mask         4176 mm/page_alloc.c 			wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
gfp_mask         4182 mm/page_alloc.c gfp_to_alloc_flags(gfp_t gfp_mask)
gfp_mask         4195 mm/page_alloc.c 	alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
gfp_mask         4197 mm/page_alloc.c 	if (gfp_mask & __GFP_ATOMIC) {
gfp_mask         4202 mm/page_alloc.c 		if (!(gfp_mask & __GFP_NOMEMALLOC))
gfp_mask         4212 mm/page_alloc.c 	if (gfp_mask & __GFP_KSWAPD_RECLAIM)
gfp_mask         4216 mm/page_alloc.c 	if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
gfp_mask         4241 mm/page_alloc.c static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
gfp_mask         4243 mm/page_alloc.c 	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
gfp_mask         4245 mm/page_alloc.c 	if (gfp_mask & __GFP_MEMALLOC)
gfp_mask         4259 mm/page_alloc.c bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
gfp_mask         4261 mm/page_alloc.c 	return !!__gfp_pfmemalloc_flags(gfp_mask);
gfp_mask         4275 mm/page_alloc.c should_reclaim_retry(gfp_t gfp_mask, unsigned order,
gfp_mask         4399 mm/page_alloc.c __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
gfp_mask         4402 mm/page_alloc.c 	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
gfp_mask         4418 mm/page_alloc.c 	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
gfp_mask         4420 mm/page_alloc.c 		gfp_mask &= ~__GFP_ATOMIC;
gfp_mask         4433 mm/page_alloc.c 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
gfp_mask         4447 mm/page_alloc.c 		wake_all_kswapds(order, gfp_mask, ac);
gfp_mask         4453 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
gfp_mask         4469 mm/page_alloc.c 			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
gfp_mask         4470 mm/page_alloc.c 		page = __alloc_pages_direct_compact(gfp_mask, order,
gfp_mask         4477 mm/page_alloc.c 		 if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
gfp_mask         4478 mm/page_alloc.c 		     !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
gfp_mask         4505 mm/page_alloc.c 		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
gfp_mask         4529 mm/page_alloc.c 		wake_all_kswapds(order, gfp_mask, ac);
gfp_mask         4531 mm/page_alloc.c 	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
gfp_mask         4547 mm/page_alloc.c 	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
gfp_mask         4560 mm/page_alloc.c 	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
gfp_mask         4566 mm/page_alloc.c 	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
gfp_mask         4572 mm/page_alloc.c 	if (gfp_mask & __GFP_NORETRY)
gfp_mask         4579 mm/page_alloc.c 	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
gfp_mask         4582 mm/page_alloc.c 	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
gfp_mask         4604 mm/page_alloc.c 	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
gfp_mask         4611 mm/page_alloc.c 	     (gfp_mask & __GFP_NOMEMALLOC)))
gfp_mask         4629 mm/page_alloc.c 	if (gfp_mask & __GFP_NOFAIL) {
gfp_mask         4658 mm/page_alloc.c 		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
gfp_mask         4666 mm/page_alloc.c 	warn_alloc(gfp_mask, ac->nodemask,
gfp_mask         4672 mm/page_alloc.c static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
gfp_mask         4677 mm/page_alloc.c 	ac->high_zoneidx = gfp_zone(gfp_mask);
gfp_mask         4678 mm/page_alloc.c 	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
gfp_mask         4680 mm/page_alloc.c 	ac->migratetype = gfpflags_to_migratetype(gfp_mask);
gfp_mask         4690 mm/page_alloc.c 	fs_reclaim_acquire(gfp_mask);
gfp_mask         4691 mm/page_alloc.c 	fs_reclaim_release(gfp_mask);
gfp_mask         4693 mm/page_alloc.c 	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
gfp_mask         4695 mm/page_alloc.c 	if (should_fail_alloc_page(gfp_mask, order))
gfp_mask         4705 mm/page_alloc.c static inline void finalise_ac(gfp_t gfp_mask, struct alloc_context *ac)
gfp_mask         4708 mm/page_alloc.c 	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
gfp_mask         4723 mm/page_alloc.c __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
gfp_mask         4736 mm/page_alloc.c 		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
gfp_mask         4740 mm/page_alloc.c 	gfp_mask &= gfp_allowed_mask;
gfp_mask         4741 mm/page_alloc.c 	alloc_mask = gfp_mask;
gfp_mask         4742 mm/page_alloc.c 	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
gfp_mask         4745 mm/page_alloc.c 	finalise_ac(gfp_mask, &ac);
gfp_mask         4751 mm/page_alloc.c 	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
gfp_mask         4764 mm/page_alloc.c 	alloc_mask = current_gfp_context(gfp_mask);
gfp_mask         4777 mm/page_alloc.c 	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
gfp_mask         4778 mm/page_alloc.c 	    unlikely(__memcg_kmem_charge(page, gfp_mask, order) != 0)) {
gfp_mask         4794 mm/page_alloc.c unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
gfp_mask         4798 mm/page_alloc.c 	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
gfp_mask         4805 mm/page_alloc.c unsigned long get_zeroed_page(gfp_t gfp_mask)
gfp_mask         4807 mm/page_alloc.c 	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
gfp_mask         4848 mm/page_alloc.c 					     gfp_t gfp_mask)
gfp_mask         4851 mm/page_alloc.c 	gfp_t gfp = gfp_mask;
gfp_mask         4854 mm/page_alloc.c 	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
gfp_mask         4856 mm/page_alloc.c 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
gfp_mask         4878 mm/page_alloc.c 		      unsigned int fragsz, gfp_t gfp_mask)
gfp_mask         4886 mm/page_alloc.c 		page = __page_frag_cache_refill(nc, gfp_mask);
gfp_mask         4974 mm/page_alloc.c void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
gfp_mask         4979 mm/page_alloc.c 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
gfp_mask         4980 mm/page_alloc.c 		gfp_mask &= ~__GFP_COMP;
gfp_mask         4982 mm/page_alloc.c 	addr = __get_free_pages(gfp_mask, order);
gfp_mask         4999 mm/page_alloc.c void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
gfp_mask         5004 mm/page_alloc.c 	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
gfp_mask         5005 mm/page_alloc.c 		gfp_mask &= ~__GFP_COMP;
gfp_mask         5007 mm/page_alloc.c 	p = alloc_pages_node(nid, gfp_mask, order);
gfp_mask         8384 mm/page_alloc.c 		       unsigned migratetype, gfp_t gfp_mask)
gfp_mask         8397 mm/page_alloc.c 		.gfp_mask = current_gfp_context(gfp_mask),
gfp_mask           25 mm/page_owner.c 	gfp_t gfp_mask;
gfp_mask          164 mm/page_owner.c 	unsigned int order, gfp_t gfp_mask)
gfp_mask          173 mm/page_owner.c 		page_owner->gfp_mask = gfp_mask;
gfp_mask          183 mm/page_owner.c 					gfp_t gfp_mask)
gfp_mask          191 mm/page_owner.c 	handle = save_stack(gfp_mask);
gfp_mask          192 mm/page_owner.c 	__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
gfp_mask          235 mm/page_owner.c 	new_page_owner->gfp_mask = old_page_owner->gfp_mask;
gfp_mask          316 mm/page_owner.c 					page_owner->gfp_mask);
gfp_mask          354 mm/page_owner.c 			page_owner->order, page_owner->gfp_mask,
gfp_mask          355 mm/page_owner.c 			&page_owner->gfp_mask);
gfp_mask          362 mm/page_owner.c 	page_mt  = gfpflags_to_migratetype(page_owner->gfp_mask);
gfp_mask          409 mm/page_owner.c 	gfp_t gfp_mask;
gfp_mask          418 mm/page_owner.c 	gfp_mask = page_owner->gfp_mask;
gfp_mask          419 mm/page_owner.c 	mt = gfpflags_to_migratetype(gfp_mask);
gfp_mask          432 mm/page_owner.c 		 page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
gfp_mask          166 mm/readahead.c 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
gfp_mask          191 mm/readahead.c 						gfp_mask);
gfp_mask          196 mm/readahead.c 		page = __page_cache_alloc(gfp_mask);
gfp_mask          212 mm/readahead.c 		read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask);
gfp_mask           53 mm/sparse-vmemmap.c 		gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
gfp_mask           58 mm/sparse-vmemmap.c 		page = alloc_pages_node(node, gfp_mask, order);
gfp_mask           63 mm/sparse-vmemmap.c 			warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
gfp_mask          359 mm/swap_state.c struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask          398 mm/swap_state.c 			new_page = alloc_page_vma(gfp_mask, vma, addr);
gfp_mask          421 mm/swap_state.c 		err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
gfp_mask          448 mm/swap_state.c struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask          452 mm/swap_state.c 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
gfp_mask          539 mm/swap_state.c struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask          578 mm/swap_state.c 			gfp_mask, vma, addr, &page_allocated);
gfp_mask          594 mm/swap_state.c 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
gfp_mask          722 mm/swap_state.c static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
gfp_mask          749 mm/swap_state.c 		page = __read_swap_cache_async(entry, gfp_mask, vma,
gfp_mask          765 mm/swap_state.c 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
gfp_mask          781 mm/swap_state.c struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask          785 mm/swap_state.c 			swap_vma_readahead(entry, gfp_mask, vmf) :
gfp_mask          786 mm/swap_state.c 			swap_cluster_readahead(entry, gfp_mask, vmf);
gfp_mask         3521 mm/swapfile.c  int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
gfp_mask         3536 mm/swapfile.c  	page = alloc_page(gfp_mask | __GFP_HIGHMEM);
gfp_mask         3741 mm/swapfile.c  				  gfp_t gfp_mask)
gfp_mask         3744 mm/swapfile.c  	if (!(gfp_mask & __GFP_IO) || !memcg)
gfp_mask         1053 mm/vmalloc.c   				int node, gfp_t gfp_mask)
gfp_mask         1069 mm/vmalloc.c   			gfp_mask & GFP_RECLAIM_MASK, node);
gfp_mask         1077 mm/vmalloc.c   	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
gfp_mask         1136 mm/vmalloc.c   	if (gfpflags_allow_blocking(gfp_mask)) {
gfp_mask         1145 mm/vmalloc.c   	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
gfp_mask         1457 mm/vmalloc.c   static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
gfp_mask         1469 mm/vmalloc.c   			gfp_mask & GFP_RECLAIM_MASK, node);
gfp_mask         1475 mm/vmalloc.c   					node, gfp_mask);
gfp_mask         1481 mm/vmalloc.c   	err = radix_tree_preload(gfp_mask);
gfp_mask         1573 mm/vmalloc.c   static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
gfp_mask         1621 mm/vmalloc.c   		vaddr = new_vmap_block(order, gfp_mask);
gfp_mask         2043 mm/vmalloc.c   		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
gfp_mask         2057 mm/vmalloc.c   	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
gfp_mask         2064 mm/vmalloc.c   	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
gfp_mask         2396 mm/vmalloc.c   			    gfp_t gfp_mask, pgprot_t prot,
gfp_mask         2398 mm/vmalloc.c   static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
gfp_mask         2403 mm/vmalloc.c   	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
gfp_mask         2404 mm/vmalloc.c   	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
gfp_mask         2405 mm/vmalloc.c   	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
gfp_mask         2444 mm/vmalloc.c   		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
gfp_mask         2454 mm/vmalloc.c   	warn_alloc(gfp_mask, NULL,
gfp_mask         2480 mm/vmalloc.c   			unsigned long start, unsigned long end, gfp_t gfp_mask,
gfp_mask         2493 mm/vmalloc.c   				vm_flags, start, end, node, gfp_mask, caller);
gfp_mask         2497 mm/vmalloc.c   	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
gfp_mask         2508 mm/vmalloc.c   	kmemleak_vmalloc(area, size, gfp_mask);
gfp_mask         2513 mm/vmalloc.c   	warn_alloc(gfp_mask, NULL,
gfp_mask         2549 mm/vmalloc.c   			    gfp_t gfp_mask, pgprot_t prot,
gfp_mask         2553 mm/vmalloc.c   				gfp_mask, prot, 0, node, caller);
gfp_mask         2556 mm/vmalloc.c   void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
gfp_mask         2558 mm/vmalloc.c   	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
gfp_mask          114 mm/vmscan.c    	gfp_t gfp_mask;
gfp_mask          593 mm/vmscan.c    static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
gfp_mask          613 mm/vmscan.c    			.gfp_mask = gfp_mask,
gfp_mask          668 mm/vmscan.c    static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
gfp_mask          695 mm/vmscan.c    static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
gfp_mask          710 mm/vmscan.c    		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
gfp_mask          717 mm/vmscan.c    			.gfp_mask = gfp_mask,
gfp_mask         1163 mm/vmscan.c    		may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
gfp_mask         1164 mm/vmscan.c    			(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
gfp_mask         1290 mm/vmscan.c    				if (!(sc->gfp_mask & __GFP_IO))
gfp_mask         1444 mm/vmscan.c    			if (!try_to_release_page(page, sc->gfp_mask))
gfp_mask         1540 mm/vmscan.c    		.gfp_mask = GFP_KERNEL,
gfp_mask         1852 mm/vmscan.c    	if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
gfp_mask         2154 mm/vmscan.c    		.gfp_mask = GFP_KERNEL,
gfp_mask         2814 mm/vmscan.c    			shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
gfp_mask         2818 mm/vmscan.c    			vmpressure(sc->gfp_mask, memcg, false,
gfp_mask         2830 mm/vmscan.c    		vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
gfp_mask         2967 mm/vmscan.c    	orig_mask = sc->gfp_mask;
gfp_mask         2969 mm/vmscan.c    		sc->gfp_mask |= __GFP_HIGHMEM;
gfp_mask         2970 mm/vmscan.c    		sc->reclaim_idx = gfp_zone(sc->gfp_mask);
gfp_mask         3017 mm/vmscan.c    						sc->order, sc->gfp_mask,
gfp_mask         3035 mm/vmscan.c    	sc->gfp_mask = orig_mask;
gfp_mask         3083 mm/vmscan.c    		vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
gfp_mask         3180 mm/vmscan.c    static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
gfp_mask         3219 mm/vmscan.c    					gfp_zone(gfp_mask), nodemask) {
gfp_mask         3245 mm/vmscan.c    	if (!(gfp_mask & __GFP_FS)) {
gfp_mask         3265 mm/vmscan.c    				gfp_t gfp_mask, nodemask_t *nodemask)
gfp_mask         3270 mm/vmscan.c    		.gfp_mask = current_gfp_context(gfp_mask),
gfp_mask         3271 mm/vmscan.c    		.reclaim_idx = gfp_zone(gfp_mask),
gfp_mask         3293 mm/vmscan.c    	if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask))
gfp_mask         3297 mm/vmscan.c    	trace_mm_vmscan_direct_reclaim_begin(order, sc.gfp_mask);
gfp_mask         3311 mm/vmscan.c    						gfp_t gfp_mask, bool noswap,
gfp_mask         3327 mm/vmscan.c    	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
gfp_mask         3331 mm/vmscan.c    						      sc.gfp_mask);
gfp_mask         3351 mm/vmscan.c    					   gfp_t gfp_mask,
gfp_mask         3361 mm/vmscan.c    		.gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) |
gfp_mask         3381 mm/vmscan.c    	trace_mm_vmscan_memcg_reclaim_begin(0, sc.gfp_mask);
gfp_mask         3589 mm/vmscan.c    		.gfp_mask = GFP_KERNEL,
gfp_mask         3700 mm/vmscan.c    						sc.gfp_mask, &nr_soft_scanned);
gfp_mask         4031 mm/vmscan.c    		.gfp_mask = GFP_HIGHUSER_MOVABLE,
gfp_mask         4039 mm/vmscan.c    	struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
gfp_mask         4043 mm/vmscan.c    	fs_reclaim_acquire(sc.gfp_mask);
gfp_mask         4051 mm/vmscan.c    	fs_reclaim_release(sc.gfp_mask);
gfp_mask         4209 mm/vmscan.c    static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
gfp_mask         4217 mm/vmscan.c    		.gfp_mask = current_gfp_context(gfp_mask),
gfp_mask         4223 mm/vmscan.c    		.reclaim_idx = gfp_zone(gfp_mask),
gfp_mask         4227 mm/vmscan.c    					   sc.gfp_mask);
gfp_mask         4230 mm/vmscan.c    	fs_reclaim_acquire(sc.gfp_mask);
gfp_mask         4253 mm/vmscan.c    	fs_reclaim_release(sc.gfp_mask);
gfp_mask         4260 mm/vmscan.c    int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
gfp_mask         4281 mm/vmscan.c    	if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC))
gfp_mask         4296 mm/vmscan.c    	ret = __node_reclaim(pgdat, gfp_mask, order);
gfp_mask           12 net/ceph/msgpool.c static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
gfp_mask           18 net/ceph/msgpool.c 			    gfp_mask, true);
gfp_mask          181 net/core/skbuff.c struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
gfp_mask          194 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask          197 net/core/skbuff.c 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
gfp_mask          209 net/core/skbuff.c 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
gfp_mask          371 net/core/skbuff.c static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
gfp_mask          375 net/core/skbuff.c 	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
gfp_mask          425 net/core/skbuff.c 				   gfp_t gfp_mask)
gfp_mask          435 net/core/skbuff.c 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
gfp_mask          436 net/core/skbuff.c 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
gfp_mask          446 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask          450 net/core/skbuff.c 		data = page_frag_alloc(nc, len, gfp_mask);
gfp_mask          455 net/core/skbuff.c 		data = page_frag_alloc(nc, len, gfp_mask);
gfp_mask          497 net/core/skbuff.c 				 gfp_t gfp_mask)
gfp_mask          506 net/core/skbuff.c 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
gfp_mask          507 net/core/skbuff.c 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
gfp_mask          517 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask          519 net/core/skbuff.c 	data = page_frag_alloc(&nc->page, len, gfp_mask);
gfp_mask         1308 net/core/skbuff.c 			      gfp_t gfp_mask)
gfp_mask         1313 net/core/skbuff.c 			if (!gfp_mask) {
gfp_mask         1342 net/core/skbuff.c int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask         1349 net/core/skbuff.c 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
gfp_mask         1357 net/core/skbuff.c 		page = alloc_page(gfp_mask);
gfp_mask         1430 net/core/skbuff.c struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask         1437 net/core/skbuff.c 	if (skb_orphan_frags(skb, gfp_mask))
gfp_mask         1446 net/core/skbuff.c 			gfp_mask |= __GFP_MEMALLOC;
gfp_mask         1448 net/core/skbuff.c 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
gfp_mask         1509 net/core/skbuff.c struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
gfp_mask         1513 net/core/skbuff.c 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
gfp_mask         1549 net/core/skbuff.c 				   gfp_t gfp_mask, bool fclone)
gfp_mask         1553 net/core/skbuff.c 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
gfp_mask         1572 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask) ||
gfp_mask         1573 net/core/skbuff.c 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
gfp_mask         1613 net/core/skbuff.c 		     gfp_t gfp_mask)
gfp_mask         1627 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask         1629 net/core/skbuff.c 			       gfp_mask, NUMA_NO_NODE, NULL);
gfp_mask         1649 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask))
gfp_mask         1740 net/core/skbuff.c 				gfp_t gfp_mask)
gfp_mask         1746 net/core/skbuff.c 					gfp_mask, skb_alloc_rx_flag(skb),
gfp_mask         5648 net/core/skbuff.c 				     gfp_t gfp_mask)
gfp_mask         5664 net/core/skbuff.c 	skb = alloc_skb(header_len, gfp_mask);
gfp_mask         5675 net/core/skbuff.c 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
gfp_mask         5687 net/core/skbuff.c 		page = alloc_page(gfp_mask);
gfp_mask         5707 net/core/skbuff.c 				    const int headlen, gfp_t gfp_mask)
gfp_mask         5717 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask         5720 net/core/skbuff.c 			       gfp_mask, NUMA_NO_NODE, NULL);
gfp_mask         5736 net/core/skbuff.c 		if (skb_orphan_frags(skb, gfp_mask)) {
gfp_mask         5777 net/core/skbuff.c 				gfp_t gfp_mask)
gfp_mask         5796 net/core/skbuff.c 				clone = skb_clone(list, gfp_mask);
gfp_mask         5805 net/core/skbuff.c 			if (pskb_carve(list, eat, gfp_mask) < 0) {
gfp_mask         5830 net/core/skbuff.c 				       int pos, gfp_t gfp_mask)
gfp_mask         5841 net/core/skbuff.c 		gfp_mask |= __GFP_MEMALLOC;
gfp_mask         5844 net/core/skbuff.c 			       gfp_mask, NUMA_NO_NODE, NULL);
gfp_mask         5853 net/core/skbuff.c 	if (skb_orphan_frags(skb, gfp_mask)) {
gfp_mask         5887 net/core/skbuff.c 		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
gfp_mask         1018 net/ipv4/tcp_output.c 			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
gfp_mask         1044 net/ipv4/tcp_output.c 				skb = pskb_copy(oskb, gfp_mask);
gfp_mask         1046 net/ipv4/tcp_output.c 				skb = skb_clone(oskb, gfp_mask);
gfp_mask         1191 net/ipv4/tcp_output.c 			    gfp_t gfp_mask)
gfp_mask         1193 net/ipv4/tcp_output.c 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
gfp_mask          155 net/netlink/af_netlink.c 					   gfp_t gfp_mask)
gfp_mask          160 net/netlink/af_netlink.c 	new = alloc_skb(len, gfp_mask);
gfp_mask          530 net/sunrpc/auth.c 	if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL)
gfp_mask          145 net/sunrpc/auth_gss/gss_krb5_keys.c 		    gfp_t gfp_mask)
gfp_mask          169 net/sunrpc/auth_gss/gss_krb5_keys.c 	inblockdata = kmalloc(blocksize, gfp_mask);
gfp_mask          173 net/sunrpc/auth_gss/gss_krb5_keys.c 	outblockdata = kmalloc(blocksize, gfp_mask);
gfp_mask          177 net/sunrpc/auth_gss/gss_krb5_keys.c 	rawkey = kmalloc(keybytes, gfp_mask);
gfp_mask          357 net/sunrpc/auth_gss/gss_krb5_mech.c context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
gfp_mask          384 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          474 net/sunrpc/auth_gss/gss_krb5_mech.c context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
gfp_mask          490 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          505 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          520 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          530 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          540 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          550 net/sunrpc/auth_gss/gss_krb5_mech.c 	err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
gfp_mask          586 net/sunrpc/auth_gss/gss_krb5_mech.c 		gfp_t gfp_mask)
gfp_mask          636 net/sunrpc/auth_gss/gss_krb5_mech.c 				      gss_kerberos_mech.gm_oid.len, gfp_mask);
gfp_mask          645 net/sunrpc/auth_gss/gss_krb5_mech.c 		return context_derive_keys_des3(ctx, gfp_mask);
gfp_mask          650 net/sunrpc/auth_gss/gss_krb5_mech.c 		return context_derive_keys_new(ctx, gfp_mask);
gfp_mask          663 net/sunrpc/auth_gss/gss_krb5_mech.c 				gfp_t gfp_mask)
gfp_mask          669 net/sunrpc/auth_gss/gss_krb5_mech.c 	ctx = kzalloc(sizeof(*ctx), gfp_mask);
gfp_mask          676 net/sunrpc/auth_gss/gss_krb5_mech.c 		ret = gss_import_v2_context(p, end, ctx, gfp_mask);
gfp_mask          378 net/sunrpc/auth_gss/gss_mech_switch.c 		       gfp_t gfp_mask)
gfp_mask          380 net/sunrpc/auth_gss/gss_mech_switch.c 	if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask)))
gfp_mask          385 net/sunrpc/auth_gss/gss_mech_switch.c 						*ctx_id, endtime, gfp_mask);
gfp_mask          123 security/integrity/ima/ima_crypto.c 	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
gfp_mask          129 security/integrity/ima/ima_crypto.c 		ptr = (void *)__get_free_pages(gfp_mask, order);
gfp_mask          138 security/integrity/ima/ima_crypto.c 	gfp_mask = GFP_KERNEL;
gfp_mask          141 security/integrity/ima/ima_crypto.c 		gfp_mask |= __GFP_NOWARN;
gfp_mask          143 security/integrity/ima/ima_crypto.c 	ptr = (void *)__get_free_pages(gfp_mask, 0);
gfp_mask          236 security/integrity/integrity.h integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
gfp_mask          238 security/integrity/integrity.h 	return audit_log_start(ctx, gfp_mask, type);
gfp_mask          250 security/integrity/integrity.h integrity_audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type)
gfp_mask          362 security/selinux/ss/mls.c 		    gfp_t gfp_mask)
gfp_mask          370 security/selinux/ss/mls.c 	tmpstr = kstrdup(str, gfp_mask);
gfp_mask           43 security/selinux/ss/mls.h 		    gfp_t gfp_mask);