gfp               448 arch/alpha/kernel/pci_iommu.c 				      dma_addr_t *dma_addrp, gfp_t gfp,
gfp               455 arch/alpha/kernel/pci_iommu.c 	gfp &= ~GFP_DMA;
gfp               458 arch/alpha/kernel/pci_iommu.c 	cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
gfp               472 arch/alpha/kernel/pci_iommu.c 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
gfp               476 arch/alpha/kernel/pci_iommu.c 		gfp |= GFP_DMA;
gfp               106 arch/arm/include/asm/dma-mapping.h 			   gfp_t gfp, unsigned long attrs);
gfp                34 arch/arm/mm/dma-mapping-nommu.c 				 dma_addr_t *dma_handle, gfp_t gfp,
gfp                47 arch/arm/mm/dma-mapping.c 	gfp_t gfp;
gfp               202 arch/arm/mm/dma-mapping.c 	dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
gfp               300 arch/arm/mm/dma-mapping.c 				       gfp_t gfp, int coherent_flag)
gfp               305 arch/arm/mm/dma-mapping.c 	page = alloc_pages(gfp, order);
gfp               337 arch/arm/mm/dma-mapping.c 				     int coherent_flag, gfp_t gfp);
gfp               339 arch/arm/mm/dma-mapping.c static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
gfp               361 arch/arm/mm/dma-mapping.c 	gfp_t gfp = GFP_KERNEL | GFP_DMA;
gfp               377 arch/arm/mm/dma-mapping.c 		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
gfp               482 arch/arm/mm/dma-mapping.c static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
gfp               492 arch/arm/mm/dma-mapping.c 	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
gfp               548 arch/arm/mm/dma-mapping.c 				     int coherent_flag, gfp_t gfp)
gfp               555 arch/arm/mm/dma-mapping.c 	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
gfp               600 arch/arm/mm/dma-mapping.c static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
gfp               605 arch/arm/mm/dma-mapping.c 	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
gfp               616 arch/arm/mm/dma-mapping.c 	return __alloc_simple_buffer(args->dev, args->size, args->gfp,
gfp               636 arch/arm/mm/dma-mapping.c 				       args->gfp);
gfp               669 arch/arm/mm/dma-mapping.c 	return __alloc_remap_buffer(args->dev, args->size, args->gfp,
gfp               688 arch/arm/mm/dma-mapping.c 			 gfp_t gfp, pgprot_t prot, bool is_coherent,
gfp               699 arch/arm/mm/dma-mapping.c 		.gfp = gfp,
gfp               719 arch/arm/mm/dma-mapping.c 		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
gfp               724 arch/arm/mm/dma-mapping.c 		gfp |= GFP_DMA;
gfp               733 arch/arm/mm/dma-mapping.c 	gfp &= ~(__GFP_COMP);
gfp               734 arch/arm/mm/dma-mapping.c 	args.gfp = gfp;
gfp               737 arch/arm/mm/dma-mapping.c 	allowblock = gfpflags_allow_blocking(gfp);
gfp               772 arch/arm/mm/dma-mapping.c 		    gfp_t gfp, unsigned long attrs)
gfp               776 arch/arm/mm/dma-mapping.c 	return __dma_alloc(dev, size, handle, gfp, prot, false,
gfp               781 arch/arm/mm/dma-mapping.c 	dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
gfp               783 arch/arm/mm/dma-mapping.c 	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
gfp              1237 arch/arm/mm/dma-mapping.c 					  gfp_t gfp, unsigned long attrs,
gfp              1259 arch/arm/mm/dma-mapping.c 						 gfp & __GFP_NOWARN);
gfp              1278 arch/arm/mm/dma-mapping.c 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
gfp              1293 arch/arm/mm/dma-mapping.c 			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
gfp              1301 arch/arm/mm/dma-mapping.c 			pages[i] = alloc_pages(gfp, 0);
gfp              1426 arch/arm/mm/dma-mapping.c static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
gfp              1434 arch/arm/mm/dma-mapping.c 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
gfp              1462 arch/arm/mm/dma-mapping.c 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
gfp              1472 arch/arm/mm/dma-mapping.c 	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
gfp              1473 arch/arm/mm/dma-mapping.c 		return __iommu_alloc_simple(dev, size, gfp, handle,
gfp              1483 arch/arm/mm/dma-mapping.c 	gfp &= ~(__GFP_COMP);
gfp              1485 arch/arm/mm/dma-mapping.c 	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
gfp              1511 arch/arm/mm/dma-mapping.c 	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
gfp              1513 arch/arm/mm/dma-mapping.c 	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
gfp              1517 arch/arm/mm/dma-mapping.c 		    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
gfp              1519 arch/arm/mm/dma-mapping.c 	return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
gfp              2356 arch/arm/mm/dma-mapping.c 		gfp_t gfp, unsigned long attrs)
gfp              2358 arch/arm/mm/dma-mapping.c 	return __dma_alloc(dev, size, dma_handle, gfp,
gfp                24 arch/arm64/include/asm/pgalloc.h 	gfp_t gfp = GFP_PGTABLE_USER;
gfp                28 arch/arm64/include/asm/pgalloc.h 		gfp = GFP_PGTABLE_KERNEL;
gfp                30 arch/arm64/include/asm/pgalloc.h 	page = alloc_page(gfp);
gfp                22 arch/arm64/mm/pgd.c 	gfp_t gfp = GFP_PGTABLE_USER;
gfp                25 arch/arm64/mm/pgd.c 		return (pgd_t *)__get_free_page(gfp);
gfp                27 arch/arm64/mm/pgd.c 		return kmem_cache_alloc(pgd_cache, gfp);
gfp                75 arch/c6x/mm/dma-coherent.c 		gfp_t gfp, unsigned long attrs)
gfp                12 arch/ia64/kernel/dma-mapping.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp                14 arch/ia64/kernel/dma-mapping.c 	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
gfp                41 arch/m68k/kernel/dma.c 		gfp_t gfp, unsigned long attrs)
gfp                46 arch/m68k/kernel/dma.c 		gfp |= GFP_DMA;
gfp                47 arch/m68k/kernel/dma.c 	ret = (void *)__get_free_pages(gfp, get_order(size));
gfp               564 arch/mips/jazz/jazzdma.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               568 arch/mips/jazz/jazzdma.c 	ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
gfp                88 arch/openrisc/kernel/dma.c 		gfp_t gfp, unsigned long attrs)
gfp                93 arch/openrisc/kernel/dma.c 	page = alloc_pages_exact(size, gfp | __GFP_ZERO);
gfp               398 arch/parisc/kernel/pci-dma.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               410 arch/parisc/kernel/pci-dma.c 	paddr = __get_free_pages(gfp | __GFP_ZERO, order);
gfp               422 arch/parisc/kernel/pci-dma.c 		gfp |= GFP_DMA;
gfp                 8 arch/powerpc/include/asm/pgalloc.h static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
gfp                11 arch/powerpc/include/asm/pgalloc.h 		return gfp;
gfp                12 arch/powerpc/include/asm/pgalloc.h 	return gfp | __GFP_ACCOUNT;
gfp                15 arch/powerpc/include/asm/pgalloc.h static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
gfp                17 arch/powerpc/include/asm/pgalloc.h 	return gfp | __GFP_ACCOUNT;
gfp               292 arch/powerpc/mm/book3s64/pgtable.c 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
gfp               295 arch/powerpc/mm/book3s64/pgtable.c 		gfp &= ~__GFP_ACCOUNT;
gfp               296 arch/powerpc/mm/book3s64/pgtable.c 	page = alloc_page(gfp);
gfp               374 arch/powerpc/perf/hv-24x7.c static char *memdup_to_str(char *maybe_str, int max_len, gfp_t gfp)
gfp               376 arch/powerpc/perf/hv-24x7.c 	return kasprintf(gfp, "%.*s", max_len, maybe_str);
gfp                13 arch/sh/kernel/dma-coherent.c 		gfp_t gfp, unsigned long attrs)
gfp                18 arch/sh/kernel/dma-coherent.c 	gfp |= __GFP_ZERO;
gfp                20 arch/sh/kernel/dma-coherent.c 	ret = (void *)__get_free_pages(gfp, order);
gfp               198 arch/sparc/kernel/iommu.c 				   dma_addr_t *dma_addrp, gfp_t gfp,
gfp               214 arch/sparc/kernel/iommu.c 	page = alloc_pages_node(nid, gfp, order);
gfp               319 arch/sparc/kernel/ioport.c 		gfp_t gfp, unsigned long attrs)
gfp               328 arch/sparc/kernel/ioport.c 	va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
gfp               180 arch/sparc/kernel/pci_sun4v.c 				   dma_addr_t *dma_addrp, gfp_t gfp,
gfp               204 arch/sparc/kernel/pci_sun4v.c 	page = alloc_pages_node(nid, gfp, order);
gfp               215 arch/sparc/mm/io-unit.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               227 arch/sparc/mm/io-unit.c 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
gfp               308 arch/sparc/mm/iommu.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               321 arch/sparc/mm/iommu.c 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
gfp               612 arch/x86/events/intel/pt.c static struct topa *topa_alloc(int cpu, gfp_t gfp)
gfp               618 arch/x86/events/intel/pt.c 	p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
gfp               701 arch/x86/events/intel/pt.c static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
gfp               712 arch/x86/events/intel/pt.c 		topa = topa_alloc(cpu, gfp);
gfp              1150 arch/x86/events/intel/pt.c 			       unsigned long nr_pages, gfp_t gfp)
gfp              1155 arch/x86/events/intel/pt.c 	topa = topa_alloc(cpu, gfp);
gfp              1162 arch/x86/events/intel/pt.c 		err = topa_insert_pages(buf, cpu, gfp);
gfp               198 arch/x86/include/asm/kexec.h 				       gfp_t gfp);
gfp                92 arch/x86/include/asm/pgalloc.h 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
gfp                95 arch/x86/include/asm/pgalloc.h 		gfp &= ~__GFP_ACCOUNT;
gfp                96 arch/x86/include/asm/pgalloc.h 	page = alloc_pages(gfp, 0);
gfp               152 arch/x86/include/asm/pgalloc.h 	gfp_t gfp = GFP_KERNEL_ACCOUNT;
gfp               155 arch/x86/include/asm/pgalloc.h 		gfp &= ~__GFP_ACCOUNT;
gfp               156 arch/x86/include/asm/pgalloc.h 	return (pud_t *)get_zeroed_page(gfp);
gfp               192 arch/x86/include/asm/pgalloc.h 	gfp_t gfp = GFP_KERNEL_ACCOUNT;
gfp               195 arch/x86/include/asm/pgalloc.h 		gfp &= ~__GFP_ACCOUNT;
gfp               196 arch/x86/include/asm/pgalloc.h 	return (p4d_t *)get_zeroed_page(gfp);
gfp               666 arch/x86/kernel/machine_kexec_64.c int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
gfp               222 arch/x86/mm/pgtable.c 	gfp_t gfp = GFP_PGTABLE_USER;
gfp               225 arch/x86/mm/pgtable.c 		gfp &= ~__GFP_ACCOUNT;
gfp               228 arch/x86/mm/pgtable.c 		pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
gfp               175 arch/x86/mm/pti.c 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
gfp               183 arch/x86/mm/pti.c 		unsigned long new_p4d_page = __get_free_page(gfp);
gfp               202 arch/x86/mm/pti.c 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
gfp               212 arch/x86/mm/pti.c 		unsigned long new_pud_page = __get_free_page(gfp);
gfp               226 arch/x86/mm/pti.c 		unsigned long new_pmd_page = __get_free_page(gfp);
gfp               247 arch/x86/mm/pti.c 	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
gfp               262 arch/x86/mm/pti.c 		unsigned long new_pte_page = __get_free_page(gfp);
gfp                19 block/bfq-cgroup.c static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
gfp                23 block/bfq-cgroup.c 	ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
gfp               449 block/bfq-cgroup.c static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
gfp               452 block/bfq-cgroup.c 	if (blkg_rwstat_init(&stats->merged, gfp) ||
gfp               453 block/bfq-cgroup.c 	    blkg_rwstat_init(&stats->service_time, gfp) ||
gfp               454 block/bfq-cgroup.c 	    blkg_rwstat_init(&stats->wait_time, gfp) ||
gfp               455 block/bfq-cgroup.c 	    blkg_rwstat_init(&stats->queued, gfp) ||
gfp               456 block/bfq-cgroup.c 	    bfq_stat_init(&stats->time, gfp) ||
gfp               457 block/bfq-cgroup.c 	    bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
gfp               458 block/bfq-cgroup.c 	    bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
gfp               459 block/bfq-cgroup.c 	    bfq_stat_init(&stats->dequeue, gfp) ||
gfp               460 block/bfq-cgroup.c 	    bfq_stat_init(&stats->group_wait_time, gfp) ||
gfp               461 block/bfq-cgroup.c 	    bfq_stat_init(&stats->idle_time, gfp) ||
gfp               462 block/bfq-cgroup.c 	    bfq_stat_init(&stats->empty_time, gfp)) {
gfp               481 block/bfq-cgroup.c static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
gfp               485 block/bfq-cgroup.c 	bgd = kzalloc(sizeof(*bgd), gfp);
gfp               504 block/bfq-cgroup.c static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
gfp               509 block/bfq-cgroup.c 	bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
gfp               513 block/bfq-cgroup.c 	if (bfqg_stats_init(&bfqg->stats, gfp)) {
gfp              1891 block/bio.c    		      gfp_t gfp, struct bio_set *bs)
gfp              1898 block/bio.c    	split = bio_clone_fast(bio, gfp, bs);
gfp              1985 block/blk-iocost.c static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
gfp              1989 block/blk-iocost.c 	iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
gfp              2002 block/blk-iocost.c static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
gfp              2009 block/blk-iocost.c 			    gfp, q->node);
gfp               937 block/blk-iolatency.c static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
gfp               943 block/blk-iolatency.c 	iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
gfp               947 block/blk-iolatency.c 				       __alignof__(struct latency_stat), gfp);
gfp                13 block/blk-lib.c struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
gfp                15 block/blk-lib.c 	struct bio *new = bio_alloc(gfp, nr_pages);
gfp              2355 block/blk-mq.c 	gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
gfp              2357 block/blk-mq.c 	hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
gfp              2361 block/blk-mq.c 	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
gfp              2382 block/blk-mq.c 			gfp, node);
gfp              2387 block/blk-mq.c 				gfp, node))
gfp              2396 block/blk-mq.c 			gfp);
gfp               481 block/blk-throttle.c static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
gfp               488 block/blk-throttle.c 	tg = kzalloc_node(sizeof(*tg), gfp, q->node);
gfp               352 block/blk.h    struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
gfp               368 crypto/skcipher.c 			gfp_t gfp = skcipher_walk_gfp(walk);
gfp               370 crypto/skcipher.c 			walk->page = (void *)__get_free_page(gfp);
gfp               186 drivers/base/devcoredump.c 		   gfp_t gfp)
gfp               188 drivers/base/devcoredump.c 	dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
gfp               253 drivers/base/devcoredump.c 		   void *data, size_t datalen, gfp_t gfp,
gfp               275 drivers/base/devcoredump.c 	devcd = kzalloc(sizeof(*devcd), gfp);
gfp               330 drivers/base/devcoredump.c 		    size_t datalen, gfp_t gfp)
gfp               332 drivers/base/devcoredump.c 	dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
gfp                93 drivers/base/devres.c 						size_t size, gfp_t gfp, int nid)
gfp               103 drivers/base/devres.c 	dr = kmalloc_node_track_caller(tot_size, gfp, nid);
gfp               122 drivers/base/devres.c void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid,
gfp               127 drivers/base/devres.c 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
gfp               149 drivers/base/devres.c void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid)
gfp               153 drivers/base/devres.c 	dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid);
gfp               546 drivers/base/devres.c void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
gfp               551 drivers/base/devres.c 	grp = kmalloc(sizeof(*grp), gfp);
gfp               810 drivers/base/devres.c void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
gfp               815 drivers/base/devres.c 	dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev));
gfp               839 drivers/base/devres.c char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
gfp               848 drivers/base/devres.c 	buf = devm_kmalloc(dev, size, gfp);
gfp               868 drivers/base/devres.c const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
gfp               873 drivers/base/devres.c 	return devm_kstrdup(dev, s, gfp);
gfp               888 drivers/base/devres.c char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
gfp               899 drivers/base/devres.c 	p = devm_kmalloc(dev, len+1, gfp);
gfp               920 drivers/base/devres.c char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...)
gfp               926 drivers/base/devres.c 	p = devm_kvasprintf(dev, gfp, fmt, ap);
gfp               966 drivers/base/devres.c void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
gfp               970 drivers/base/devres.c 	p = devm_kmalloc(dev, len, gfp);
gfp              1009 drivers/base/firmware_loader/main.c 	const char *name, struct device *device, gfp_t gfp, void *context,
gfp              1014 drivers/base/firmware_loader/main.c 	fw_work = kzalloc(sizeof(struct firmware_work), gfp);
gfp              1019 drivers/base/firmware_loader/main.c 	fw_work->name = kstrdup_const(name, gfp);
gfp                58 drivers/block/drbd/drbd_state.c static struct drbd_state_change *alloc_state_change(unsigned int n_devices, unsigned int n_connections, gfp_t gfp)
gfp                67 drivers/block/drbd/drbd_state.c 	state_change = kmalloc(size, gfp);
gfp                83 drivers/block/drbd/drbd_state.c struct drbd_state_change *remember_old_state(struct drbd_resource *resource, gfp_t gfp)
gfp                99 drivers/block/drbd/drbd_state.c 	state_change = alloc_state_change(n_devices, n_connections, gfp);
gfp              1117 drivers/block/loop.c 	gfp_t gfp = lo->old_gfp_mask;
gfp              1168 drivers/block/loop.c 	mapping_set_gfp_mask(filp->f_mapping, gfp);
gfp              2603 drivers/block/skd_main.c 			   dma_addr_t *dma_handle, gfp_t gfp,
gfp              2609 drivers/block/skd_main.c 	buf = kmem_cache_alloc(s, gfp);
gfp               183 drivers/crypto/atmel-ecc.c 	gfp_t gfp;
gfp               195 drivers/crypto/atmel-ecc.c 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
gfp               198 drivers/crypto/atmel-ecc.c 	work_data = kmalloc(sizeof(*work_data), gfp);
gfp               144 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp               212 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp               572 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp               627 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp              1118 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp              1220 drivers/crypto/bcm/cipher.c 				rctx->gfp);
gfp              1759 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp              1976 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp              2087 drivers/crypto/bcm/cipher.c 	gfp_t gfp;
gfp              2102 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp              2105 drivers/crypto/bcm/cipher.c 				     crypto_shash_descsize(hash), gfp);
gfp              2160 drivers/crypto/bcm/cipher.c 	gfp_t gfp;
gfp              2174 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp              2176 drivers/crypto/bcm/cipher.c 		tmpbuf = kmalloc(req->nbytes, gfp);
gfp              2254 drivers/crypto/bcm/cipher.c 	gfp_t gfp;
gfp              2270 drivers/crypto/bcm/cipher.c 		gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp              2272 drivers/crypto/bcm/cipher.c 		tmpbuf = kmalloc(req->nbytes, gfp);
gfp              2714 drivers/crypto/bcm/cipher.c 	rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
gfp               344 drivers/crypto/bcm/cipher.h 	gfp_t gfp;
gfp               142 drivers/crypto/cavium/nitrox/nitrox_aead.c 	creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL :
gfp                68 drivers/crypto/cavium/nitrox/nitrox_req.h 	gfp_t gfp;
gfp               509 drivers/crypto/cavium/nitrox/nitrox_req.h 	gfp_t gfp;
gfp               547 drivers/crypto/cavium/nitrox/nitrox_req.h static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp)
gfp               554 drivers/crypto/cavium/nitrox/nitrox_req.h 	return kzalloc(size, gfp);
gfp               617 drivers/crypto/cavium/nitrox/nitrox_req.h 	nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp);
gfp               664 drivers/crypto/cavium/nitrox/nitrox_req.h 	nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp);
gfp               119 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sgcomp = kzalloc(sz_comp, sr->gfp);
gfp               388 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr = kzalloc(sizeof(*sr), req->gfp);
gfp               394 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->gfp = req->gfp;
gfp               215 drivers/crypto/cavium/nitrox/nitrox_skcipher.c 	creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
gfp                65 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	gfp_t gfp;
gfp               108 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
gfp               110 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
gfp               278 drivers/crypto/ccp/ccp-crypto-main.c 	gfp_t gfp;
gfp               280 drivers/crypto/ccp/ccp-crypto-main.c 	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
gfp               282 drivers/crypto/ccp/ccp-crypto-main.c 	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
gfp                66 drivers/crypto/ccp/ccp-crypto-sha.c 	gfp_t gfp;
gfp               100 drivers/crypto/ccp/ccp-crypto-sha.c 		gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
gfp               103 drivers/crypto/ccp/ccp-crypto-sha.c 		ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp);
gfp              1794 drivers/crypto/chelsio/chtls/chtls_cm.c 				   size_t len, gfp_t gfp)
gfp              1801 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb = alloc_skb(len, gfp);
gfp              1099 drivers/crypto/chelsio/chtls/chtls_io.c 				gfp_t gfp = sk->sk_allocation;
gfp              1103 drivers/crypto/chelsio/chtls/chtls_io.c 					page = alloc_pages(gfp | __GFP_COMP |
gfp              1111 drivers/crypto/chelsio/chtls/chtls_io.c 					page = alloc_page(gfp);
gfp                62 drivers/crypto/qce/ablkcipher.c 	gfp_t gfp;
gfp                89 drivers/crypto/qce/ablkcipher.c 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
gfp                92 drivers/crypto/qce/ablkcipher.c 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
gfp               306 drivers/dma/bcm2835-dma.c 	size_t period_len, gfp_t gfp)
gfp               319 drivers/dma/bcm2835-dma.c 	d = kzalloc(struct_size(d, cb_list, frames), gfp);
gfp               333 drivers/dma/bcm2835-dma.c 		cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
gfp               492 drivers/dma/sh/rcar-dmac.c static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
gfp               499 drivers/dma/sh/rcar-dmac.c 	page = (void *)get_zeroed_page(gfp);
gfp               623 drivers/dma/sh/rcar-dmac.c static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp)
gfp               630 drivers/dma/sh/rcar-dmac.c 	page = (void *)get_zeroed_page(gfp);
gfp               264 drivers/dma/sh/usb-dmac.c 			       gfp_t gfp)
gfp               269 drivers/dma/sh/usb-dmac.c 	desc = kzalloc(struct_size(desc, sg, sg_len), gfp);
gfp               299 drivers/dma/sh/usb-dmac.c 					       unsigned int sg_len, gfp_t gfp)
gfp               316 drivers/dma/sh/usb-dmac.c 	if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
gfp               170 drivers/gpu/drm/i915/gem/i915_gem_context.c static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
gfp               189 drivers/gpu/drm/i915/gem/i915_gem_context.c 	return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp);
gfp                43 drivers/gpu/drm/i915/gem/i915_gem_internal.c 	gfp_t gfp;
gfp                59 drivers/gpu/drm/i915/gem/i915_gem_internal.c 	gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
gfp                62 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		gfp &= ~__GFP_HIGHMEM;
gfp                63 drivers/gpu/drm/i915/gem/i915_gem_internal.c 		gfp |= __GFP_DMA32;
gfp                86 drivers/gpu/drm/i915/gem/i915_gem_internal.c 			page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
gfp                87 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		gfp_t gfp = noreclaim;
gfp                91 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
gfp               113 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 				gfp = mapping_gfp_mask(mapping);
gfp               129 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 				gfp |= __GFP_RETRY_MAYFAIL;
gfp               148 drivers/gpu/drm/i915/gem/i915_gem_shmem.c 		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
gfp               365 drivers/gpu/drm/i915/i915_gem_gtt.c static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
gfp               378 drivers/gpu/drm/i915/i915_gem_gtt.c 		return alloc_page(gfp);
gfp               397 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = alloc_page(gfp);
gfp               571 drivers/gpu/drm/i915/i915_gem_gtt.c 			    gfp_t gfp)
gfp               573 drivers/gpu/drm/i915/i915_gem_gtt.c 	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
gfp               618 drivers/gpu/drm/i915/i915_gem_gtt.c setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
gfp               637 drivers/gpu/drm/i915/i915_gem_gtt.c 		gfp |= __GFP_NOWARN;
gfp               639 drivers/gpu/drm/i915/i915_gem_gtt.c 	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
gfp               646 drivers/gpu/drm/i915/i915_gem_gtt.c 		page = alloc_pages(gfp, order);
gfp               675 drivers/gpu/drm/i915/i915_gem_gtt.c 		gfp &= ~__GFP_NOWARN;
gfp               183 drivers/gpu/drm/i915/i915_gpu_error.c static int pool_refill(struct pagevec *pv, gfp_t gfp)
gfp               188 drivers/gpu/drm/i915/i915_gpu_error.c 		p = alloc_page(gfp);
gfp               198 drivers/gpu/drm/i915/i915_gpu_error.c static int pool_init(struct pagevec *pv, gfp_t gfp)
gfp               204 drivers/gpu/drm/i915/i915_gpu_error.c 	err = pool_refill(pv, gfp);
gfp               211 drivers/gpu/drm/i915/i915_gpu_error.c static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
gfp               215 drivers/gpu/drm/i915/i915_gpu_error.c 	p = alloc_page(gfp);
gfp               340 drivers/gpu/drm/i915/i915_request.c 			       gfp_t gfp)
gfp               350 drivers/gpu/drm/i915/i915_request.c 	cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
gfp               604 drivers/gpu/drm/i915/i915_request.c request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
gfp               611 drivers/gpu/drm/i915/i915_request.c 	if (!gfpflags_allow_blocking(gfp))
gfp               619 drivers/gpu/drm/i915/i915_request.c 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
gfp               631 drivers/gpu/drm/i915/i915_request.c 	return kmem_cache_alloc(global.slab_requests, gfp);
gfp               635 drivers/gpu/drm/i915/i915_request.c __i915_request_create(struct intel_context *ce, gfp_t gfp)
gfp               642 drivers/gpu/drm/i915/i915_request.c 	might_sleep_if(gfpflags_allow_blocking(gfp));
gfp               677 drivers/gpu/drm/i915/i915_request.c 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
gfp               679 drivers/gpu/drm/i915/i915_request.c 		rq = request_alloc_slow(tl, gfp);
gfp               834 drivers/gpu/drm/i915/i915_request.c 		    gfp_t gfp)
gfp               854 drivers/gpu/drm/i915/i915_request.c 	err = __i915_request_await_execution(to, from, NULL, gfp);
gfp               248 drivers/gpu/drm/i915/i915_request.h __i915_request_create(struct intel_context *ce, gfp_t gfp);
gfp               304 drivers/gpu/drm/i915/i915_sw_fence.c 					  wait_queue_entry_t *wq, gfp_t gfp)
gfp               310 drivers/gpu/drm/i915/i915_sw_fence.c 	might_sleep_if(gfpflags_allow_blocking(gfp));
gfp               325 drivers/gpu/drm/i915/i915_sw_fence.c 		wq = kmalloc(sizeof(*wq), gfp);
gfp               327 drivers/gpu/drm/i915/i915_sw_fence.c 			if (!gfpflags_allow_blocking(gfp))
gfp               367 drivers/gpu/drm/i915/i915_sw_fence.c 				     gfp_t gfp)
gfp               369 drivers/gpu/drm/i915/i915_sw_fence.c 	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
gfp               437 drivers/gpu/drm/i915/i915_sw_fence.c 				  gfp_t gfp)
gfp               444 drivers/gpu/drm/i915/i915_sw_fence.c 	might_sleep_if(gfpflags_allow_blocking(gfp));
gfp               452 drivers/gpu/drm/i915/i915_sw_fence.c 		     gfp);
gfp               454 drivers/gpu/drm/i915/i915_sw_fence.c 		if (!gfpflags_allow_blocking(gfp))
gfp               530 drivers/gpu/drm/i915/i915_sw_fence.c 				    gfp_t gfp)
gfp               536 drivers/gpu/drm/i915/i915_sw_fence.c 	might_sleep_if(gfpflags_allow_blocking(gfp));
gfp               554 drivers/gpu/drm/i915/i915_sw_fence.c 								gfp);
gfp               574 drivers/gpu/drm/i915/i915_sw_fence.c 							gfp);
gfp                70 drivers/gpu/drm/i915/i915_sw_fence.h 				     gfp_t gfp);
gfp                83 drivers/gpu/drm/i915/i915_sw_fence.h 				  gfp_t gfp);
gfp                90 drivers/gpu/drm/i915/i915_sw_fence.h 				    gfp_t gfp);
gfp               108 drivers/gpu/drm/i915/selftests/lib_sw_fence.c struct i915_sw_fence *heap_fence_create(gfp_t gfp)
gfp               112 drivers/gpu/drm/i915/selftests/lib_sw_fence.c 	h = kmalloc(sizeof(*h), gfp);
gfp                40 drivers/gpu/drm/i915/selftests/lib_sw_fence.h struct i915_sw_fence *heap_fence_create(gfp_t gfp);
gfp               155 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 	gfp_t gfp = GFP_USER | __GFP_ZERO;
gfp               200 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		gfp |= GFP_HIGHUSER;
gfp               202 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		gfp |= GFP_DMA32;
gfp               205 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c 		struct page *p = alloc_page(gfp);
gfp              1129 drivers/gpu/drm/tegra/drm.c 	gfp_t gfp;
gfp              1137 drivers/gpu/drm/tegra/drm.c 	gfp = GFP_KERNEL | __GFP_ZERO;
gfp              1145 drivers/gpu/drm/tegra/drm.c 		gfp |= GFP_DMA;
gfp              1148 drivers/gpu/drm/tegra/drm.c 	virt = (void *)__get_free_pages(gfp, get_order(size));
gfp               215 drivers/greybus/operation.c static int gb_message_send(struct gb_message *message, gfp_t gfp)
gfp               223 drivers/greybus/operation.c 					gfp);
gfp               469 drivers/greybus/operation.c 				 size_t response_size, gfp_t gfp)
gfp               477 drivers/greybus/operation.c 	response = gb_operation_message_alloc(hd, type, response_size, gfp);
gfp               578 drivers/greybus/operation.c 			  gfp_t gfp)
gfp               592 drivers/greybus/operation.c 					       flags, gfp);
gfp               604 drivers/greybus/operation.c 			 gfp_t gfp)
gfp               612 drivers/greybus/operation.c 					       flags, gfp);
gfp               723 drivers/greybus/operation.c 			      gfp_t gfp)
gfp               768 drivers/greybus/operation.c 	ret = gb_message_send(operation->request, gfp);
gfp               342 drivers/infiniband/core/iwpm_util.c 					u8 nl_client, gfp_t gfp)
gfp               347 drivers/infiniband/core/iwpm_util.c 	nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
gfp               109 drivers/infiniband/core/iwpm_util.h 						u8 nl_client, gfp_t gfp);
gfp               109 drivers/infiniband/hw/cxgb3/iwch_cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
gfp               264 drivers/infiniband/hw/cxgb3/iwch_cm.c static void *alloc_ep(int size, gfp_t gfp)
gfp               268 drivers/infiniband/hw/cxgb3/iwch_cm.c 	epc = kzalloc(size, gfp);
gfp               323 drivers/infiniband/hw/cxgb3/iwch_cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
gfp               329 drivers/infiniband/hw/cxgb3/iwch_cm.c 		skb = alloc_skb(len, gfp);
gfp               386 drivers/infiniband/hw/cxgb3/iwch_cm.c static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
gfp               392 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(NULL, sizeof(*req), gfp);
gfp               406 drivers/infiniband/hw/cxgb3/iwch_cm.c static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
gfp               411 drivers/infiniband/hw/cxgb3/iwch_cm.c 	skb = get_skb(skb, sizeof(*req), gfp);
gfp               657 drivers/infiniband/hw/cxgb3/iwch_cm.c static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
gfp               661 drivers/infiniband/hw/cxgb3/iwch_cm.c 	send_abort(ep, skb, gfp);
gfp              2055 drivers/infiniband/hw/cxgb3/iwch_cm.c int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
gfp              2115 drivers/infiniband/hw/cxgb3/iwch_cm.c 			ret = send_abort(ep, NULL, gfp);
gfp              2117 drivers/infiniband/hw/cxgb3/iwch_cm.c 			ret = send_halfclose(ep, gfp);
gfp               222 drivers/infiniband/hw/cxgb3/iwch_cm.h int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp);
gfp               142 drivers/infiniband/hw/cxgb4/cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
gfp               309 drivers/infiniband/hw/cxgb4/cm.c static void *alloc_ep(int size, gfp_t gfp)
gfp               313 drivers/infiniband/hw/cxgb4/cm.c 	epc = kzalloc(size, gfp);
gfp               315 drivers/infiniband/hw/cxgb4/cm.c 		epc->wr_waitp = c4iw_alloc_wr_wait(gfp);
gfp               455 drivers/infiniband/hw/cxgb4/cm.c static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
gfp               462 drivers/infiniband/hw/cxgb4/cm.c 		skb = alloc_skb(len, gfp);
gfp              3637 drivers/infiniband/hw/cxgb4/cm.c int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
gfp              1520 drivers/infiniband/hw/cxgb4/device.c struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
gfp              1524 drivers/infiniband/hw/cxgb4/device.c 	wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
gfp              1022 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
gfp              1054 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
gfp              1560 drivers/infiniband/hw/cxgb4/qp.c 			   gfp_t gfp)
gfp               113 drivers/infiniband/hw/hfi1/tid_rdma.c 					 gfp_t gfp);
gfp              1625 drivers/infiniband/hw/hfi1/tid_rdma.c 					 gfp_t gfp)
gfp              1632 drivers/infiniband/hw/hfi1/tid_rdma.c 	flows = kmalloc_node(MAX_FLOWS * sizeof(*flows), gfp,
gfp               938 drivers/infiniband/hw/mlx5/mr.c 	gfp_t gfp;
gfp               953 drivers/infiniband/hw/mlx5/mr.c 	gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
gfp               954 drivers/infiniband/hw/mlx5/mr.c 	gfp |= __GFP_ZERO | __GFP_NOWARN;
gfp               960 drivers/infiniband/hw/mlx5/mr.c 	xlt = (void *)__get_free_pages(gfp, get_order(size));
gfp               966 drivers/infiniband/hw/mlx5/mr.c 		xlt = (void *)__get_free_pages(gfp, get_order(size));
gfp               145 drivers/infiniband/ulp/ipoib/ipoib_cm.c 					     gfp_t gfp)
gfp               169 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		struct page *page = alloc_page(gfp);
gfp              1466 drivers/iommu/amd_iommu.c 				   gfp_t gfp)
gfp              1478 drivers/iommu/amd_iommu.c 	pte = (void *)get_zeroed_page(gfp);
gfp              1499 drivers/iommu/amd_iommu.c 		      gfp_t gfp,
gfp              1508 drivers/iommu/amd_iommu.c 		*updated = increase_address_space(domain, address, gfp) || *updated;
gfp              1546 drivers/iommu/amd_iommu.c 			page = (u64 *)get_zeroed_page(gfp);
gfp              1663 drivers/iommu/amd_iommu.c 			  gfp_t gfp)
gfp              1678 drivers/iommu/amd_iommu.c 	pte   = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
gfp               493 drivers/iommu/dma-iommu.c 		unsigned int count, unsigned long order_mask, gfp_t gfp)
gfp               507 drivers/iommu/dma-iommu.c 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
gfp               521 drivers/iommu/dma-iommu.c 			gfp_t alloc_flags = gfp;
gfp               565 drivers/iommu/dma-iommu.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               593 drivers/iommu/dma-iommu.c 					gfp);
gfp               961 drivers/iommu/dma-iommu.c 		struct page **pagep, gfp_t gfp, unsigned long attrs)
gfp               969 drivers/iommu/dma-iommu.c 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
gfp               971 drivers/iommu/dma-iommu.c 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
gfp               998 drivers/iommu/dma-iommu.c 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
gfp              1005 drivers/iommu/dma-iommu.c 	gfp |= __GFP_ZERO;
gfp              1007 drivers/iommu/dma-iommu.c 	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
gfp              1009 drivers/iommu/dma-iommu.c 		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
gfp              1012 drivers/iommu/dma-iommu.c 	    !gfpflags_allow_blocking(gfp) && !coherent)
gfp              1013 drivers/iommu/dma-iommu.c 		cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
gfp              1015 drivers/iommu/dma-iommu.c 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
gfp                31 drivers/iommu/intel-pasid.c int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp)
gfp                39 drivers/iommu/intel-pasid.c 	idr_preload(gfp);
gfp                79 drivers/iommu/intel-pasid.h int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
gfp               232 drivers/iommu/io-pgtable-arm-v7s.c static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
gfp               246 drivers/iommu/io-pgtable-arm-v7s.c 		table = kmem_cache_zalloc(data->l2_tables, gfp);
gfp               230 drivers/iommu/io-pgtable-arm.c static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
gfp               239 drivers/iommu/io-pgtable-arm.c 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
gfp               241 drivers/iommu/io-pgtable-arm.c 			     gfp | __GFP_ZERO, order);
gfp               316 drivers/md/bcache/bset.c 			 gfp_t gfp)
gfp               324 drivers/md/bcache/bset.c 	t->data = (void *) __get_free_pages(gfp, b->page_order);
gfp               329 drivers/md/bcache/bset.c 		? kmalloc(bset_tree_bytes(b), gfp)
gfp               330 drivers/md/bcache/bset.c 		: (void *) __get_free_pages(gfp, get_order(bset_tree_bytes(b)));
gfp               335 drivers/md/bcache/bset.c 		? kmalloc(bset_prev_bytes(b), gfp)
gfp               336 drivers/md/bcache/bset.c 		: (void *) __get_free_pages(gfp, get_order(bset_prev_bytes(b)));
gfp               294 drivers/md/bcache/bset.h 			 gfp_t gfp);
gfp               599 drivers/md/bcache/btree.c static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
gfp               605 drivers/md/bcache/btree.c 				  gfp)) {
gfp               614 drivers/md/bcache/btree.c 				      struct bkey *k, gfp_t gfp)
gfp               620 drivers/md/bcache/btree.c 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
gfp               634 drivers/md/bcache/btree.c 	mca_data_alloc(b, k, gfp);
gfp              1753 drivers/md/bcache/super.c #define alloc_bucket_pages(gfp, c)			\
gfp              1754 drivers/md/bcache/super.c 	((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
gfp                42 drivers/md/bcache/util.h #define init_heap(heap, _size, gfp)					\
gfp                48 drivers/md/bcache/util.h 	(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL);		\
gfp               127 drivers/md/bcache/util.h #define __init_fifo(fifo, gfp)						\
gfp               138 drivers/md/bcache/util.h 	(fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL);		\
gfp               142 drivers/md/bcache/util.h #define init_fifo_exact(fifo, _size, gfp)				\
gfp               145 drivers/md/bcache/util.h 	__init_fifo(fifo, gfp);						\
gfp               148 drivers/md/bcache/util.h #define init_fifo(fifo, _size, gfp)					\
gfp               153 drivers/md/bcache/util.h 	__init_fifo(fifo, gfp);						\
gfp                63 drivers/md/dm-bio-prison-v1.c struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
gfp                65 drivers/md/dm-bio-prison-v1.c 	return mempool_alloc(&prison->cell_pool, gfp);
gfp                60 drivers/md/dm-bio-prison-v1.h 						    gfp_t gfp);
gfp                66 drivers/md/dm-bio-prison-v2.c struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp)
gfp                68 drivers/md/dm-bio-prison-v2.c 	return mempool_alloc(&prison->cell_pool, gfp);
gfp                67 drivers/md/dm-bio-prison-v2.h 						    gfp_t gfp);
gfp              1523 drivers/md/dm-bufio.c static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
gfp              1525 drivers/md/dm-bufio.c 	if (!(gfp & __GFP_FS)) {
gfp              1454 drivers/md/dm-crypt.c static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
gfp              1465 drivers/md/dm-crypt.c 	clone = bio_clone_fast(io->base_bio, gfp, &cc->bs);
gfp               214 drivers/md/dm-kcopyd.c static struct page_list *alloc_pl(gfp_t gfp)
gfp               218 drivers/md/dm-kcopyd.c 	pl = kmalloc(sizeof(*pl), gfp);
gfp               222 drivers/md/dm-kcopyd.c 	pl->page = alloc_page(gfp);
gfp               720 drivers/md/dm-snap.c static struct dm_exception *alloc_completed_exception(gfp_t gfp)
gfp               724 drivers/md/dm-snap.c 	e = kmem_cache_alloc(exception_cache, gfp);
gfp               725 drivers/md/dm-snap.c 	if (!e && gfp == GFP_NOIO)
gfp               552 drivers/md/dm-table.c 	gfp_t gfp;
gfp               556 drivers/md/dm-table.c 		gfp = GFP_KERNEL;
gfp               559 drivers/md/dm-table.c 		gfp = GFP_NOIO;
gfp               561 drivers/md/dm-table.c 	argv = kmalloc_array(new_size, sizeof(*argv), gfp);
gfp              1472 drivers/md/dm-writecache.c static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
gfp              1231 drivers/md/raid1.c 	gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
gfp              1295 drivers/md/raid1.c 					      gfp, &conf->bio_split);
gfp              1305 drivers/md/raid1.c 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
gfp              1146 drivers/md/raid10.c 	gfp_t gfp = GFP_NOIO;
gfp              1161 drivers/md/raid10.c 		gfp = GFP_NOIO | __GFP_HIGH;
gfp              1194 drivers/md/raid10.c 					      gfp, &conf->bio_split);
gfp              1205 drivers/md/raid10.c 	read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
gfp               467 drivers/md/raid5.c static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
gfp               475 drivers/md/raid5.c 		if (!(page = alloc_page(gfp))) {
gfp              2131 drivers/md/raid5.c static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
gfp              2137 drivers/md/raid5.c 	sh = kmem_cache_zalloc(sc, gfp);
gfp              2156 drivers/md/raid5.c 			sh->ppl_page = alloc_page(gfp);
gfp              2165 drivers/md/raid5.c static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
gfp              2169 drivers/md/raid5.c 	sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf);
gfp              2173 drivers/md/raid5.c 	if (grow_buffers(sh, gfp)) {
gfp               304 drivers/mfd/dln2.c 			   int *obuf_len, gfp_t gfp)
gfp               311 drivers/mfd/dln2.c 	buf = kmalloc(len, gfp);
gfp               631 drivers/mfd/dln2.c static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
gfp               638 drivers/mfd/dln2.c 		ret = usb_submit_urb(dln2->rx_urb[i], gfp);
gfp               134 drivers/misc/mic/host/mic_boot.c 			     dma_addr_t *dma_handle, gfp_t gfp,
gfp               140 drivers/misc/mic/host/mic_boot.c 	void *va = kmalloc(size, gfp | __GFP_ZERO);
gfp                17 drivers/misc/mic/scif/scif_map.h 		    gfp_t gfp)
gfp                22 drivers/misc/mic/scif/scif_map.h 		va = kmalloc(size, gfp);
gfp                27 drivers/misc/mic/scif/scif_map.h 					size, dma_handle, gfp);
gfp               162 drivers/mmc/core/queue.c static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
gfp               166 drivers/mmc/core/queue.c 	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
gfp               205 drivers/mmc/core/queue.c 			      gfp_t gfp)
gfp               211 drivers/mmc/core/queue.c 	mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
gfp               393 drivers/net/ethernet/alacritech/slicoss.c static void slic_refill_rx_queue(struct slic_device *sdev, gfp_t gfp)
gfp               407 drivers/net/ethernet/alacritech/slicoss.c 		skb = alloc_skb(maplen + ALIGN_MASK, gfp);
gfp               464 drivers/net/ethernet/amazon/ena/ena_netdev.c 				    struct ena_rx_buffer *rx_info, gfp_t gfp)
gfp               474 drivers/net/ethernet/amazon/ena/ena_netdev.c 	page = alloc_page(gfp);
gfp               861 drivers/net/ethernet/amd/lance.c lance_init_ring(struct net_device *dev, gfp_t gfp)
gfp               873 drivers/net/ethernet/amd/lance.c 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
gfp               878 drivers/net/ethernet/amd/lance.c 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
gfp               291 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	gfp_t gfp;
gfp               298 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 	gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
gfp               300 drivers/net/ethernet/amd/xgbe/xgbe-desc.c 		pages = alloc_pages_node(node, gfp, order);
gfp                72 drivers/net/ethernet/atheros/alx/main.c static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
gfp                97 drivers/net/ethernet/atheros/alx/main.c 		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
gfp              1190 drivers/net/ethernet/broadcom/b44.c static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
gfp              1195 drivers/net/ethernet/broadcom/b44.c 	bp->rx_buffers = kzalloc(size, gfp);
gfp              1200 drivers/net/ethernet/broadcom/b44.c 	bp->tx_buffers = kzalloc(size, gfp);
gfp              1206 drivers/net/ethernet/broadcom/b44.c 					 &bp->rx_ring_dma, gfp);
gfp              1214 drivers/net/ethernet/broadcom/b44.c 		rx_ring = kzalloc(size, gfp);
gfp              1234 drivers/net/ethernet/broadcom/b44.c 					 &bp->tx_ring_dma, gfp);
gfp              1242 drivers/net/ethernet/broadcom/b44.c 		tx_ring = kzalloc(size, gfp);
gfp              2728 drivers/net/ethernet/broadcom/bnx2.c bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
gfp              2734 drivers/net/ethernet/broadcom/bnx2.c 	struct page *page = alloc_page(gfp);
gfp              2769 drivers/net/ethernet/broadcom/bnx2.c bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
gfp              2777 drivers/net/ethernet/broadcom/bnx2.c 	data = kmalloc(bp->rx_buf_size, gfp);
gfp               687 drivers/net/ethernet/broadcom/bnxt/bnxt.c 					 gfp_t gfp)
gfp               707 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				       gfp_t gfp)
gfp               712 drivers/net/ethernet/broadcom/bnxt/bnxt.c 	data = kmalloc(bp->rx_buf_size, gfp);
gfp               728 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		       u16 prod, gfp_t gfp)
gfp               736 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
gfp               744 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
gfp               790 drivers/net/ethernet/broadcom/bnxt/bnxt.c 				     u16 prod, gfp_t gfp)
gfp               804 drivers/net/ethernet/broadcom/bnxt/bnxt.c 			page = alloc_page(gfp);
gfp               817 drivers/net/ethernet/broadcom/bnxt/bnxt.c 		page = alloc_page(gfp);
gfp              1960 drivers/net/ethernet/broadcom/bnxt/bnxt.h 		       u16 prod, gfp_t gfp);
gfp                93 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 					       struct rbdr *rbdr, gfp_t gfp)
gfp               121 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
gfp               176 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 					 gfp_t gfp, u32 buf_len, u64 *rbuf)
gfp               193 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 	pgcache = nicvf_alloc_page(nic, rbdr, gfp);
gfp               379 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
gfp               417 drivers/net/ethernet/cavium/thunder/nicvf_queues.c 		if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
gfp               830 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 					       gfp_t gfp)
gfp               837 drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c 		skb = alloc_skb(len, gfp);
gfp               443 drivers/net/ethernet/chelsio/cxgb3/sge.c 			  struct rx_sw_desc *sd, gfp_t gfp,
gfp               449 drivers/net/ethernet/chelsio/cxgb3/sge.c 		q->pg_chunk.page = alloc_pages(gfp, order);
gfp               505 drivers/net/ethernet/chelsio/cxgb3/sge.c static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
gfp               516 drivers/net/ethernet/chelsio/cxgb3/sge.c 			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
gfp               531 drivers/net/ethernet/chelsio/cxgb3/sge.c 			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
gfp               592 drivers/net/ethernet/chelsio/cxgb4/sge.c 			      gfp_t gfp)
gfp               607 drivers/net/ethernet/chelsio/cxgb4/sge.c 	gfp |= __GFP_NOWARN;
gfp               617 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
gfp               648 drivers/net/ethernet/chelsio/cxgb4/sge.c 		pg = alloc_pages_node(node, gfp, 0);
gfp               605 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 			      int n, gfp_t gfp)
gfp               621 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 	gfp |= __GFP_NOWARN;
gfp               633 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		page = __dev_alloc_pages(gfp, s->fl_pg_order);
gfp               677 drivers/net/ethernet/chelsio/cxgb4vf/sge.c 		page = __dev_alloc_page(gfp);
gfp              2576 drivers/net/ethernet/emulex/benet/be_main.c static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
gfp              2581 drivers/net/ethernet/emulex/benet/be_main.c 		gfp |= __GFP_COMP;
gfp              2582 drivers/net/ethernet/emulex/benet/be_main.c 	return  alloc_pages(gfp, order);
gfp              2589 drivers/net/ethernet/emulex/benet/be_main.c static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
gfp              2603 drivers/net/ethernet/emulex/benet/be_main.c 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
gfp               383 drivers/net/ethernet/faraday/ftgmac100.c 				  struct ftgmac100_rxdes *rxdes, gfp_t gfp)
gfp                73 drivers/net/ethernet/faraday/ftmac100.c 				  struct ftmac100_rxdes *rxdes, gfp_t gfp);
gfp               660 drivers/net/ethernet/faraday/ftmac100.c 				  struct ftmac100_rxdes *rxdes, gfp_t gfp)
gfp               666 drivers/net/ethernet/faraday/ftmac100.c 	page = alloc_page(gfp);
gfp               254 drivers/net/ethernet/intel/e1000e/e1000.h 			     gfp_t gfp);
gfp               645 drivers/net/ethernet/intel/e1000e/netdev.c 				   int cleaned_count, gfp_t gfp)
gfp               666 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
gfp               713 drivers/net/ethernet/intel/e1000e/netdev.c 				      int cleaned_count, gfp_t gfp)
gfp               739 drivers/net/ethernet/intel/e1000e/netdev.c 				ps_page->page = alloc_page(gfp);
gfp               765 drivers/net/ethernet/intel/e1000e/netdev.c 						  gfp);
gfp               817 drivers/net/ethernet/intel/e1000e/netdev.c 					 int cleaned_count, gfp_t gfp)
gfp               838 drivers/net/ethernet/intel/e1000e/netdev.c 		skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
gfp               849 drivers/net/ethernet/intel/e1000e/netdev.c 			buffer_info->page = alloc_page(gfp);
gfp              1439 drivers/net/ethernet/marvell/sky2.c static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2, gfp_t gfp)
gfp              1446 drivers/net/ethernet/marvell/sky2.c 				 gfp);
gfp              1464 drivers/net/ethernet/marvell/sky2.c 		struct page *page = alloc_page(gfp);
gfp                55 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			   gfp_t gfp)
gfp                60 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	page = alloc_page(gfp);
gfp                78 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			       gfp_t gfp)
gfp                84 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			if (mlx4_alloc_page(priv, frags, gfp))
gfp               135 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				   gfp_t gfp)
gfp               154 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
gfp              1631 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 				      gfp_t gfp)
gfp              1664 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
gfp              1680 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	gfp_t gfp;
gfp              1697 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
gfp              1699 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	inb = alloc_msg(dev, in_size, gfp);
gfp              1713 drivers/net/ethernet/mellanox/mlx5/core/cmd.c 	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
gfp               130 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c 	gfp_t gfp;
gfp               135 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c 	gfp = GFP_KERNEL | __GFP_ZERO;
gfp               136 drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c 	buff = (void *)__get_free_pages(gfp,
gfp                67 drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h #define l2addr_hash_add(hash, mac, type, gfp) ({            \
gfp                71 drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h 	ptr = kzalloc(sizeof(type), gfp);                   \
gfp               539 drivers/net/ethernet/natsemi/ns83820.c static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
gfp               549 drivers/net/ethernet/natsemi/ns83820.c 	if (gfp == GFP_ATOMIC)
gfp               556 drivers/net/ethernet/natsemi/ns83820.c 		skb = __netdev_alloc_skb(ndev, REAL_RX_BUF_SIZE+16, gfp);
gfp               561 drivers/net/ethernet/natsemi/ns83820.c 		if (gfp != GFP_ATOMIC)
gfp               564 drivers/net/ethernet/natsemi/ns83820.c 		if (gfp != GFP_ATOMIC)
gfp               571 drivers/net/ethernet/natsemi/ns83820.c 	if (gfp == GFP_ATOMIC)
gfp               332 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 			      gfp_t gfp, int order)
gfp               338 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 	gfp |= __GFP_COMP | __GFP_NOWARN;
gfp               340 drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c 		pages = alloc_pages(gfp, order);
gfp               298 drivers/net/usb/hso.c 				   struct usb_device *usb, gfp_t gfp);
gfp              3179 drivers/net/usb/hso.c 				   struct usb_device *usb, gfp_t gfp)
gfp              3191 drivers/net/usb/hso.c 	result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
gfp              1075 drivers/net/virtio_net.c 			     gfp_t gfp)
gfp              1086 drivers/net/virtio_net.c 	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
gfp              1094 drivers/net/virtio_net.c 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
gfp              1101 drivers/net/virtio_net.c 			   gfp_t gfp)
gfp              1111 drivers/net/virtio_net.c 		first = get_a_page(rq, gfp);
gfp              1124 drivers/net/virtio_net.c 	first = get_a_page(rq, gfp);
gfp              1142 drivers/net/virtio_net.c 				  first, gfp);
gfp              1166 drivers/net/virtio_net.c 				 struct receive_queue *rq, gfp_t gfp)
gfp              1182 drivers/net/virtio_net.c 	if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
gfp              1201 drivers/net/virtio_net.c 	err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
gfp              1216 drivers/net/virtio_net.c 			  gfp_t gfp)
gfp              1223 drivers/net/virtio_net.c 			err = add_recvbuf_mergeable(vi, rq, gfp);
gfp              1225 drivers/net/virtio_net.c 			err = add_recvbuf_big(vi, rq, gfp);
gfp              1227 drivers/net/virtio_net.c 			err = add_recvbuf_small(vi, rq, gfp);
gfp               142 drivers/net/wireless/ath/ath10k/wmi-tlv.c 			   size_t len, gfp_t gfp)
gfp               147 drivers/net/wireless/ath/ath10k/wmi-tlv.c 	tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
gfp               316 drivers/net/wireless/ath/carl9170/usb.c static int carl9170_usb_submit_rx_urb(struct ar9170 *ar, gfp_t gfp)
gfp               327 drivers/net/wireless/ath/carl9170/usb.c 			err = usb_submit_urb(urb, gfp);
gfp               459 drivers/net/wireless/ath/carl9170/usb.c static struct urb *carl9170_usb_alloc_rx_urb(struct ar9170 *ar, gfp_t gfp)
gfp               464 drivers/net/wireless/ath/carl9170/usb.c 	buf = kmalloc(ar->fw.rx_size, gfp);
gfp               468 drivers/net/wireless/ath/carl9170/usb.c 	urb = usb_alloc_urb(0, gfp);
gfp               277 drivers/net/wireless/ath/wcn36xx/dxe.c 				gfp_t gfp)
gfp               282 drivers/net/wireless/ath/wcn36xx/dxe.c 	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
gfp               389 drivers/net/wireless/intel/iwlegacy/3945-rs.c il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
gfp              2253 drivers/net/wireless/intel/iwlegacy/4965-rs.c il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
gfp              2760 drivers/net/wireless/intel/iwlwifi/dvm/rs.c 			  gfp_t gfp)
gfp              2753 drivers/net/wireless/intel/iwlwifi/mvm/rs.c 			      gfp_t gfp)
gfp               276 drivers/net/wireless/mediatek/mt76/usb.c 		 int nsgs, gfp_t gfp)
gfp               285 drivers/net/wireless/mediatek/mt76/usb.c 		data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
gfp               310 drivers/net/wireless/mediatek/mt76/usb.c mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
gfp               315 drivers/net/wireless/mediatek/mt76/usb.c 		return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
gfp               318 drivers/net/wireless/mediatek/mt76/usb.c 	urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
gfp                12 drivers/net/wireless/mediatek/mt7601u/dma.c 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
gfp               388 drivers/net/wireless/mediatek/mt7601u/dma.c 				 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
gfp               401 drivers/net/wireless/mediatek/mt7601u/dma.c 	ret = usb_submit_urb(e->urb, gfp);
gfp                55 drivers/net/wireless/mediatek/mt7601u/usb.c 			   struct mt7601u_dma_buf *buf, gfp_t gfp,
gfp                73 drivers/net/wireless/mediatek/mt7601u/usb.c 	ret = usb_submit_urb(buf->urb, gfp);
gfp                60 drivers/net/wireless/mediatek/mt7601u/usb.h 			   struct mt7601u_dma_buf *buf, gfp_t gfp,
gfp              1432 drivers/net/wireless/ralink/rt2x00/rt2x00.h void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
gfp               737 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
gfp               757 drivers/net/wireless/ralink/rt2x00/rt2x00dev.c 	skb = rt2x00queue_alloc_rxskb(entry, gfp);
gfp                94 drivers/net/wireless/ralink/rt2x00/rt2x00lib.h struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp);
gfp                23 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
gfp                59 drivers/net/wireless/ralink/rt2x00/rt2x00queue.c 	skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
gfp               276 drivers/net/wireless/realtek/rtlwifi/rc.c 				struct ieee80211_sta *sta, gfp_t gfp)
gfp               281 drivers/net/wireless/realtek/rtlwifi/rc.c 	rate_priv = kzalloc(sizeof(*rate_priv), gfp);
gfp               873 drivers/parisc/sba_iommu.c 		gfp_t gfp, unsigned long attrs)
gfp               883 drivers/parisc/sba_iommu.c         ret = (void *) __get_free_pages(gfp, get_order(size));
gfp               365 drivers/power/supply/power_supply_sysfs.c static char *kstruprdup(const char *str, gfp_t gfp)
gfp               369 drivers/power/supply/power_supply_sysfs.c 	ustr = ret = kmalloc(strlen(str) + 1, gfp);
gfp               351 drivers/scsi/cxgbi/libcxgbi.h static inline struct sk_buff *alloc_wr(int wrlen, int dlen, gfp_t gfp)
gfp               353 drivers/scsi/cxgbi/libcxgbi.h 	struct sk_buff *skb = alloc_skb(wrlen + dlen, gfp);
gfp               541 drivers/scsi/cxgbi/libcxgbi.h 					gfp_t gfp)
gfp               543 drivers/scsi/cxgbi/libcxgbi.h 	void *p = kzalloc(size, gfp | __GFP_NOWARN);
gfp               135 drivers/scsi/libfc/fc_fcp.c static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
gfp               140 drivers/scsi/libfc/fc_fcp.c 	fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
gfp               358 drivers/scsi/scsi_dh.c const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
gfp               368 drivers/scsi/scsi_dh.c 		handler_name = kstrdup(sdev->handler->name, gfp);
gfp              2328 drivers/scsi/scsi_transport_iscsi.c iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
gfp              2330 drivers/scsi/scsi_transport_iscsi.c 	return nlmsg_multicast(nls, skb, 0, group, gfp);
gfp              2693 drivers/spi/spi.c 		    size_t size, gfp_t gfp)
gfp              2697 drivers/spi/spi.c 	sres = kzalloc(sizeof(*sres) + size, gfp);
gfp              2804 drivers/spi/spi.c 	gfp_t gfp)
gfp              2814 drivers/spi/spi.c 			      gfp);
gfp              2895 drivers/spi/spi.c 					gfp_t gfp)
gfp              2906 drivers/spi/spi.c 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
gfp              2970 drivers/spi/spi.c 				gfp_t gfp)
gfp              2984 drivers/spi/spi.c 							   maxsize, gfp);
gfp                34 drivers/staging/media/ipu3/ipu3-dmamap.c static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
gfp                47 drivers/staging/media/ipu3/ipu3-dmamap.c 	gfp |= __GFP_HIGHMEM | __GFP_ZERO;
gfp                59 drivers/staging/media/ipu3/ipu3-dmamap.c 					   gfp | high_order_gfp : gfp, order);
gfp               114 drivers/staging/rtl8723bs/include/ioctl_cfg80211.h #define rtw_cfg80211_rx_mgmt(adapter, freq, sig_dbm, buf, len, gfp) cfg80211_rx_mgmt((adapter)->rtw_wdev, freq, sig_dbm, buf, len, 0)
gfp               116 drivers/staging/rtl8723bs/include/ioctl_cfg80211.h #define rtw_cfg80211_mgmt_tx_status(adapter, cookie, buf, len, ack, gfp) cfg80211_mgmt_tx_status((adapter)->rtw_wdev, cookie, buf, len, ack, gfp)
gfp               117 drivers/staging/rtl8723bs/include/ioctl_cfg80211.h #define rtw_cfg80211_ready_on_channel(adapter, cookie, chan, channel_type, duration, gfp)  cfg80211_ready_on_channel((adapter)->rtw_wdev, cookie, chan, duration, gfp)
gfp               118 drivers/staging/rtl8723bs/include/ioctl_cfg80211.h #define rtw_cfg80211_remain_on_channel_expired(adapter, cookie, chan, chan_type, gfp) cfg80211_remain_on_channel_expired((adapter)->rtw_wdev, cookie, chan, gfp)
gfp               198 drivers/staging/wusbcore/host/hwa-hc.c 				gfp_t gfp)
gfp               203 drivers/staging/wusbcore/host/hwa-hc.c 	return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
gfp               180 drivers/staging/wusbcore/wa-rpipe.c 			  gfp_t gfp)
gfp               187 drivers/staging/wusbcore/wa-rpipe.c 	rpipe = kzalloc(sizeof(*rpipe), gfp);
gfp               300 drivers/staging/wusbcore/wa-rpipe.c 		     struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
gfp               384 drivers/staging/wusbcore/wa-rpipe.c 			   const struct urb *urb, gfp_t gfp)
gfp               424 drivers/staging/wusbcore/wa-rpipe.c 		    struct urb *urb, gfp_t gfp)
gfp               435 drivers/staging/wusbcore/wa-rpipe.c 			result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
gfp               447 drivers/staging/wusbcore/wa-rpipe.c 		result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
gfp               450 drivers/staging/wusbcore/wa-rpipe.c 		result = rpipe_aim(rpipe, wa, ep, urb, gfp);
gfp               149 drivers/staging/wusbcore/wa-xfer.c 	gfp_t gfp;			/* allocation mask */
gfp              1649 drivers/staging/wusbcore/wa-xfer.c 	result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
gfp              1821 drivers/staging/wusbcore/wa-xfer.c 		   struct urb *urb, gfp_t gfp)
gfp              1844 drivers/staging/wusbcore/wa-xfer.c 	xfer = kzalloc(sizeof(*xfer), gfp);
gfp              1854 drivers/staging/wusbcore/wa-xfer.c 	xfer->gfp = gfp;
gfp              1763 drivers/target/target_core_transport.c 		gfp_t gfp, u64 tag, int flags)
gfp              1777 drivers/target/target_core_transport.c 	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
gfp              2519 drivers/target/target_core_transport.c 	gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
gfp              2521 drivers/target/target_core_transport.c 	*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
gfp              1599 drivers/usb/gadget/udc/pch_udc.c 			  gfp_t gfp)
gfp              1604 drivers/usb/gadget/udc/pch_udc.c 	retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
gfp              1750 drivers/usb/gadget/udc/pch_udc.c 						  gfp_t gfp)
gfp              1759 drivers/usb/gadget/udc/pch_udc.c 	req = kzalloc(sizeof *req, gfp);
gfp              1768 drivers/usb/gadget/udc/pch_udc.c 	dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
gfp              1825 drivers/usb/gadget/udc/pch_udc.c 								 gfp_t gfp)
gfp               520 drivers/usb/gadget/udc/snps_udc_core.c udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
gfp               532 drivers/usb/gadget/udc/snps_udc_core.c 	req = kzalloc(sizeof(struct udc_request), gfp);
gfp               541 drivers/usb/gadget/udc/snps_udc_core.c 		dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
gfp               877 drivers/usb/gadget/udc/snps_udc_core.c static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
gfp               895 drivers/usb/gadget/udc/snps_udc_core.c 		retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
gfp              1054 drivers/usb/gadget/udc/snps_udc_core.c udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
gfp               828 drivers/usb/mtu3/mtu3_gadget_ep0.c 	struct usb_request *req, gfp_t gfp)
gfp               444 drivers/usb/storage/uas.c static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
gfp               450 drivers/usb/storage/uas.c 	struct urb *urb = usb_alloc_urb(0, gfp);
gfp               467 drivers/usb/storage/uas.c static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
gfp               472 drivers/usb/storage/uas.c 	struct urb *urb = usb_alloc_urb(0, gfp);
gfp               478 drivers/usb/storage/uas.c 	iu = kzalloc(sizeof(*iu), gfp);
gfp               494 drivers/usb/storage/uas.c static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
gfp               500 drivers/usb/storage/uas.c 	struct urb *urb = usb_alloc_urb(0, gfp);
gfp               511 drivers/usb/storage/uas.c 	iu = kzalloc(sizeof(*iu) + len, gfp);
gfp               538 drivers/usb/storage/uas.c static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
gfp               544 drivers/usb/storage/uas.c 	urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
gfp               548 drivers/usb/storage/uas.c 	err = usb_submit_urb(urb, gfp);
gfp               646 drivers/vhost/net.c 				       struct page_frag *pfrag, gfp_t gfp)
gfp               658 drivers/vhost/net.c 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
gfp               667 drivers/vhost/net.c 	pfrag->page = alloc_page(gfp);
gfp               183 drivers/vhost/vringh.c static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
gfp               193 drivers/vhost/vringh.c 		new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
gfp               195 drivers/vhost/vringh.c 		new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
gfp               264 drivers/vhost/vringh.c 	     gfp_t gfp,
gfp               360 drivers/vhost/vringh.c 			err = resize_iovec(iov, gfp);
gfp               920 drivers/vhost/vringh.c 			gfp_t gfp)
gfp               934 drivers/vhost/vringh.c 			   gfp, copydesc_kern);
gfp               394 drivers/virtio/virtio_ring.c 					       gfp_t gfp)
gfp               404 drivers/virtio/virtio_ring.c 	gfp &= ~__GFP_HIGHMEM;
gfp               406 drivers/virtio/virtio_ring.c 	desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
gfp               422 drivers/virtio/virtio_ring.c 				      gfp_t gfp)
gfp               448 drivers/virtio/virtio_ring.c 		desc = alloc_indirect_split(_vq, total_sg, gfp);
gfp               963 drivers/virtio/virtio_ring.c 						       gfp_t gfp)
gfp               972 drivers/virtio/virtio_ring.c 	gfp &= ~__GFP_HIGHMEM;
gfp               974 drivers/virtio/virtio_ring.c 	desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
gfp               985 drivers/virtio/virtio_ring.c 				       gfp_t gfp)
gfp               994 drivers/virtio/virtio_ring.c 	desc = alloc_indirect_packed(total_sg, gfp);
gfp              1098 drivers/virtio/virtio_ring.c 				       gfp_t gfp)
gfp              1123 drivers/virtio/virtio_ring.c 				out_sgs, in_sgs, data, gfp);
gfp              1700 drivers/virtio/virtio_ring.c 				gfp_t gfp)
gfp              1705 drivers/virtio/virtio_ring.c 					out_sgs, in_sgs, data, ctx, gfp) :
gfp              1707 drivers/virtio/virtio_ring.c 					out_sgs, in_sgs, data, ctx, gfp);
gfp              1729 drivers/virtio/virtio_ring.c 		      gfp_t gfp)
gfp              1741 drivers/virtio/virtio_ring.c 			     data, NULL, gfp);
gfp              1761 drivers/virtio/virtio_ring.c 			 gfp_t gfp)
gfp              1763 drivers/virtio/virtio_ring.c 	return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
gfp              1783 drivers/virtio/virtio_ring.c 			gfp_t gfp)
gfp              1785 drivers/virtio/virtio_ring.c 	return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
gfp              1807 drivers/virtio/virtio_ring.c 			gfp_t gfp)
gfp              1809 drivers/virtio/virtio_ring.c 	return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
gfp                32 drivers/visorbus/visorbus_private.h struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
gfp               349 drivers/visorbus/visorchannel.c struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp,
gfp               359 drivers/visorbus/visorchannel.c 	channel = kzalloc(sizeof(*channel), gfp);
gfp               453 drivers/xen/balloon.c static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
gfp               465 drivers/xen/balloon.c 		page = alloc_page(gfp);
gfp               203 fs/9p/cache.c  int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
gfp               210 fs/9p/cache.c  	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
gfp                30 fs/9p/cache.h  extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
gfp                43 fs/9p/cache.h  					    gfp_t gfp)
gfp                45 fs/9p/cache.h  	return __v9fs_fscache_release_page(page, gfp);
gfp               103 fs/9p/cache.h  					    gfp_t gfp) {
gfp               123 fs/9p/vfs_addr.c static int v9fs_release_page(struct page *page, gfp_t gfp)
gfp               127 fs/9p/vfs_addr.c 	return v9fs_fscache_release_page(page, gfp);
gfp               271 fs/afs/dir.c   			gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
gfp               277 fs/afs/dir.c   			req->pages[i] = __page_cache_alloc(gfp);
gfp               282 fs/afs/dir.c   						    i, gfp);
gfp               197 fs/afs/dir_edit.c 	gfp_t gfp;
gfp               209 fs/afs/dir_edit.c 	gfp = vnode->vfs_inode.i_mapping->gfp_mask;
gfp               210 fs/afs/dir_edit.c 	page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp);
gfp               241 fs/afs/dir_edit.c 			gfp = vnode->vfs_inode.i_mapping->gfp_mask;
gfp               243 fs/afs/dir_edit.c 						   index, gfp);
gfp               138 fs/afs/rxrpc.c 				       gfp_t gfp)
gfp               143 fs/afs/rxrpc.c 	call = kzalloc(sizeof(*call), gfp);
gfp               361 fs/afs/rxrpc.c void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
gfp               412 fs/afs/rxrpc.c 					 tx_total_len, gfp,
gfp               818 fs/buffer.c    	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
gfp               823 fs/buffer.c    		gfp |= __GFP_NOFAIL;
gfp               831 fs/buffer.c    		bh = alloc_buffer_head(gfp);
gfp               930 fs/buffer.c    	      pgoff_t index, int size, int sizebits, gfp_t gfp)
gfp               939 fs/buffer.c    	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
gfp               993 fs/buffer.c    grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
gfp              1018 fs/buffer.c    	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
gfp              1023 fs/buffer.c    	     unsigned size, gfp_t gfp)
gfp              1045 fs/buffer.c    		ret = grow_buffers(bdev, block, size, gfp);
gfp              1316 fs/buffer.c    	     unsigned size, gfp_t gfp)
gfp              1322 fs/buffer.c    		bh = __getblk_slow(bdev, block, size, gfp);
gfp              1341 fs/buffer.c    		      gfp_t gfp)
gfp              1343 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
gfp              1365 fs/buffer.c    		   unsigned size, gfp_t gfp)
gfp              1367 fs/buffer.c    	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
gfp               394 fs/cachefiles/rdwr.c 				  gfp_t gfp)
gfp               685 fs/cachefiles/rdwr.c 				   gfp_t gfp)
gfp               799 fs/cachefiles/rdwr.c 			     gfp_t gfp)
gfp               837 fs/cachefiles/rdwr.c 			      gfp_t gfp)
gfp                53 fs/ceph/cache.h static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
gfp                57 fs/ceph/cache.h 	return fscache_maybe_release_page(ci->fscache, page, gfp);
gfp               154 fs/ceph/cache.h static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp)
gfp              4233 fs/cifs/file.c 	gfp_t gfp = readahead_gfp_mask(mapping);
gfp              4246 fs/cifs/file.c 				      page->index, gfp);
gfp              4272 fs/cifs/file.c 		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
gfp              4631 fs/cifs/file.c static int cifs_release_page(struct page *page, gfp_t gfp)
gfp              4636 fs/cifs/file.c 	return cifs_fscache_release_page(page, gfp);
gfp               219 fs/cifs/fscache.c int cifs_fscache_release_page(struct page *page, gfp_t gfp)
gfp               227 fs/cifs/fscache.c 		if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
gfp                66 fs/cifs/fscache.h extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
gfp               133 fs/cifs/fscache.h static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
gfp               290 fs/erofs/data.c 	gfp_t gfp = readahead_gfp_mask(mapping);
gfp               301 fs/erofs/data.c 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
gfp               385 fs/erofs/internal.h struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
gfp                10 fs/erofs/utils.c struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
gfp                19 fs/erofs/utils.c 		page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
gfp               547 fs/erofs/zdata.c 					       gfp_t gfp)
gfp               549 fs/erofs/zdata.c 	struct page *page = erofs_allocpage(pagepool, gfp, true);
gfp               995 fs/erofs/zdata.c 					       gfp_t gfp)
gfp              1096 fs/erofs/zdata.c 	page = __stagingpage_alloc(pagepool, gfp);
gfp              1104 fs/erofs/zdata.c 	if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
gfp              1377 fs/erofs/zdata.c 	gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
gfp              1399 fs/erofs/zdata.c 		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
gfp               805 fs/ext4/mballoc.c static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
gfp               838 fs/ext4/mballoc.c 		bh = kzalloc(i, gfp);
gfp               973 fs/ext4/mballoc.c 		ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp)
gfp               992 fs/ext4/mballoc.c 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
gfp              1006 fs/ext4/mballoc.c 	page = find_or_create_page(inode->i_mapping, pnum, gfp);
gfp              1032 fs/ext4/mballoc.c int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
gfp              1052 fs/ext4/mballoc.c 	ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp);
gfp              1062 fs/ext4/mballoc.c 	ret = ext4_mb_init_cache(page, NULL, gfp);
gfp              1081 fs/ext4/mballoc.c 	ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp);
gfp              1100 fs/ext4/mballoc.c 		       struct ext4_buddy *e4b, gfp_t gfp)
gfp              1130 fs/ext4/mballoc.c 		ret = ext4_mb_init_group(sb, group, gfp);
gfp              1158 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
gfp              1162 fs/ext4/mballoc.c 				ret = ext4_mb_init_cache(page, NULL, gfp);
gfp              1194 fs/ext4/mballoc.c 		page = find_or_create_page(inode->i_mapping, pnum, gfp);
gfp              1199 fs/ext4/mballoc.c 							 gfp);
gfp                64 fs/fscache/page.c 				  gfp_t gfp)
gfp                69 fs/fscache/page.c 	_enter("%p,%p,%x", cookie, page, gfp);
gfp               128 fs/fscache/page.c 	if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
gfp               138 fs/fscache/page.c 	gfp &= ~__GFP_DIRECT_RECLAIM;
gfp               433 fs/fscache/page.c 				 gfp_t gfp)
gfp               499 fs/fscache/page.c 		ret = object->cache->ops->allocate_page(op, page, gfp);
gfp               505 fs/fscache/page.c 		ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
gfp               564 fs/fscache/page.c 				  gfp_t gfp)
gfp               627 fs/fscache/page.c 			op, pages, nr_pages, gfp);
gfp               632 fs/fscache/page.c 			op, pages, nr_pages, gfp);
gfp               677 fs/fscache/page.c 			 gfp_t gfp)
gfp               732 fs/fscache/page.c 	ret = object->cache->ops->allocate_page(op, page, gfp);
gfp               963 fs/fscache/page.c 			 gfp_t gfp)
gfp               992 fs/fscache/page.c 	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
gfp               617 fs/io_uring.c  	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
gfp               624 fs/io_uring.c  		req = kmem_cache_alloc(req_cachep, gfp);
gfp               632 fs/io_uring.c  		ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
gfp               639 fs/io_uring.c  			state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
gfp               260 fs/iomap/buffered-io.c 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp               267 fs/iomap/buffered-io.c 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp               268 fs/iomap/buffered-io.c 		ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
gfp               176 fs/mpage.c     	gfp_t gfp;
gfp               180 fs/mpage.c     		gfp = readahead_gfp_mask(page->mapping);
gfp               183 fs/mpage.c     		gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
gfp               309 fs/mpage.c     					gfp);
gfp               430 fs/nfs/file.c  static int nfs_release_page(struct page *page, gfp_t gfp)
gfp               437 fs/nfs/file.c  	return nfs_fscache_release_page(page, gfp);
gfp               338 fs/nfs/fscache.c int nfs_fscache_release_page(struct page *page, gfp_t gfp)
gfp               347 fs/nfs/fscache.c 		if (!fscache_maybe_release_page(cookie, page, gfp))
gfp               206 fs/nfs/fscache.h static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
gfp               220 fs/notify/fanotify/fanotify.c 			       struct inode *inode, gfp_t gfp,
gfp               238 fs/notify/fanotify/fanotify.c 		fid->ext_fh = kmalloc(bytes, gfp);
gfp               291 fs/notify/fanotify/fanotify.c 	gfp_t gfp = GFP_KERNEL_ACCOUNT;
gfp               301 fs/notify/fanotify/fanotify.c 		gfp |= __GFP_NOFAIL;
gfp               303 fs/notify/fanotify/fanotify.c 		gfp |= __GFP_RETRY_MAYFAIL;
gfp               311 fs/notify/fanotify/fanotify.c 		pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
gfp               319 fs/notify/fanotify/fanotify.c 	event = kmem_cache_alloc(fanotify_event_cachep, gfp);
gfp               337 fs/notify/fanotify/fanotify.c 		event->fh_type = fanotify_encode_fid(event, id, gfp, fsid);
gfp               519 fs/posix_acl.c __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p)
gfp               521 fs/posix_acl.c 	struct posix_acl *clone = posix_acl_clone(*acl, gfp);
gfp               537 fs/posix_acl.c __posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode)
gfp               539 fs/posix_acl.c 	struct posix_acl *clone = posix_acl_clone(*acl, gfp);
gfp                69 fs/ramfs/file-nommu.c 	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
gfp                84 fs/ramfs/file-nommu.c 	pages = alloc_pages(gfp, order);
gfp               108 fs/ramfs/file-nommu.c 					gfp);
gfp                59 include/asm-generic/pgalloc.h static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
gfp                63 include/asm-generic/pgalloc.h 	pte = alloc_page(gfp);
gfp               394 include/crypto/aead.h 						      gfp_t gfp)
gfp               398 include/crypto/aead.h 	req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
gfp               192 include/crypto/akcipher.h 	struct crypto_akcipher *tfm, gfp_t gfp)
gfp               196 include/crypto/akcipher.h 	req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp);
gfp               577 include/crypto/hash.h 	struct crypto_ahash *tfm, gfp_t gfp)
gfp               582 include/crypto/hash.h 		      crypto_ahash_reqsize(tfm), gfp);
gfp               172 include/crypto/kpp.h 						    gfp_t gfp)
gfp               176 include/crypto/kpp.h 	req = kmalloc(sizeof(*req) + crypto_kpp_reqsize(tfm), gfp);
gfp               527 include/crypto/skcipher.h 	struct crypto_skcipher *tfm, gfp_t gfp)
gfp               532 include/crypto/skcipher.h 		      crypto_skcipher_reqsize(tfm), gfp);
gfp               232 include/linux/backing-dev.h wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp);
gfp               238 include/linux/backing-dev.h 				    gfp_t gfp);
gfp               303 include/linux/backing-dev.h wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
gfp               317 include/linux/backing-dev.h 		wb = wb_get_create(bdi, memcg_css, gfp);
gfp               412 include/linux/backing-dev.h wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
gfp               430 include/linux/backing-dev.h wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
gfp               360 include/linux/bio.h 			     gfp_t gfp, struct bio_set *bs);
gfp               373 include/linux/bio.h 					 gfp_t gfp, struct bio_set *bs)
gfp               378 include/linux/bio.h 	return bio_split(bio, sectors, gfp, bs);
gfp               809 include/linux/bio.h static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
gfp               581 include/linux/blk-cgroup.h static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
gfp               586 include/linux/blk-cgroup.h 		ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
gfp                42 include/linux/btree-128.h 				  void *val, gfp_t gfp)
gfp                46 include/linux/btree-128.h 			    (unsigned long *)&key, val, gfp);
gfp                79 include/linux/btree-128.h 				 gfp_t gfp)
gfp                81 include/linux/btree-128.h 	return btree_merge(&target->h, &victim->h, &btree_geo128, gfp);
gfp                32 include/linux/btree-type.h 				  gfp_t gfp)
gfp                34 include/linux/btree-type.h 	return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp);
gfp                45 include/linux/btree-type.h 				   void *val, gfp_t gfp)
gfp                48 include/linux/btree-type.h 	return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp);
gfp                88 include/linux/btree-type.h 			   void *val, gfp_t gfp)
gfp                91 include/linux/btree-type.h 			    val, gfp);
gfp               116 include/linux/btree.h 			      unsigned long *key, void *val, gfp_t gfp);
gfp               159 include/linux/btree.h 		struct btree_geo *geo, gfp_t gfp);
gfp               188 include/linux/buffer_head.h 				  unsigned size, gfp_t gfp);
gfp               193 include/linux/buffer_head.h 		  gfp_t gfp);
gfp               195 include/linux/buffer_head.h 				sector_t block, unsigned size, gfp_t gfp);
gfp               338 include/linux/buffer_head.h sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
gfp               340 include/linux/buffer_head.h 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
gfp                22 include/linux/ceph/buffer.h extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp);
gfp               108 include/linux/ceph/decode.h 						size_t *lenp, gfp_t gfp)
gfp               118 include/linux/ceph/decode.h 	buf = kmalloc(len + 1, gfp);
gfp               478 include/linux/ceph/osd_client.h int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
gfp               136 include/linux/ceph/osdmap.h int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
gfp              1183 include/linux/crypto.h 	struct crypto_ablkcipher *tfm, gfp_t gfp)
gfp              1188 include/linux/crypto.h 		      crypto_ablkcipher_reqsize(tfm), gfp);
gfp                56 include/linux/devcoredump.h 		   gfp_t gfp);
gfp                59 include/linux/devcoredump.h 		   void *data, size_t datalen, gfp_t gfp,
gfp                65 include/linux/devcoredump.h 		    size_t datalen, gfp_t gfp);
gfp                68 include/linux/devcoredump.h 				 size_t datalen, gfp_t gfp)
gfp                75 include/linux/devcoredump.h 	      void *data, size_t datalen, gfp_t gfp,
gfp                84 include/linux/devcoredump.h 				  size_t datalen, gfp_t gfp)
gfp               871 include/linux/device.h extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
gfp               873 include/linux/device.h #define devres_alloc(release, size, gfp) \
gfp               874 include/linux/device.h 	__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
gfp               875 include/linux/device.h #define devres_alloc_node(release, size, gfp, nid) \
gfp               876 include/linux/device.h 	__devres_alloc_node(release, size, gfp, nid, #release)
gfp               878 include/linux/device.h extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
gfp               880 include/linux/device.h static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
gfp               882 include/linux/device.h 	return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
gfp               905 include/linux/device.h 					     gfp_t gfp);
gfp               911 include/linux/device.h extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
gfp               913 include/linux/device.h char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
gfp               916 include/linux/device.h char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
gfp               917 include/linux/device.h static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
gfp               919 include/linux/device.h 	return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
gfp               937 include/linux/device.h extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
gfp               939 include/linux/device.h 				      const char *s, gfp_t gfp);
gfp               941 include/linux/device.h 			  gfp_t gfp);
gfp               115 include/linux/dma-contiguous.h struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
gfp               161 include/linux/dma-contiguous.h 		gfp_t gfp)
gfp                70 include/linux/dma-direct.h 		gfp_t gfp, unsigned long attrs);
gfp                74 include/linux/dma-direct.h 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
gfp                78 include/linux/dma-direct.h 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
gfp                81 include/linux/dma-mapping.h 				dma_addr_t *dma_handle, gfp_t gfp,
gfp               450 include/linux/dma-mapping.h 		gfp_t gfp, unsigned long attrs);
gfp               529 include/linux/dma-mapping.h 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               645 include/linux/dma-mapping.h 		dma_addr_t *dma_handle, gfp_t gfp)
gfp               648 include/linux/dma-mapping.h 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
gfp               649 include/linux/dma-mapping.h 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
gfp               775 include/linux/dma-mapping.h 		dma_addr_t *dma_handle, gfp_t gfp)
gfp               777 include/linux/dma-mapping.h 	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
gfp               778 include/linux/dma-mapping.h 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
gfp               782 include/linux/dma-mapping.h 				 dma_addr_t *dma_addr, gfp_t gfp)
gfp               786 include/linux/dma-mapping.h 	if (gfp & __GFP_NOWARN)
gfp               789 include/linux/dma-mapping.h 	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
gfp                41 include/linux/dma-noncoherent.h 		gfp_t gfp, unsigned long attrs);
gfp                49 include/linux/firmware.h 	const char *name, struct device *device, gfp_t gfp, void *context,
gfp                74 include/linux/firmware.h 	const char *name, struct device *device, gfp_t gfp, void *context,
gfp                37 include/linux/flex_proportions.h int fprop_global_init(struct fprop_global *p, gfp_t gfp);
gfp                84 include/linux/flex_proportions.h int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
gfp               156 include/linux/fscache-cache.h 					     gfp_t gfp);
gfp               161 include/linux/fscache-cache.h 					      gfp_t gfp);
gfp               544 include/linux/fscache.h 			       gfp_t gfp)
gfp               548 include/linux/fscache.h 						    context, gfp);
gfp               595 include/linux/fscache.h 				gfp_t gfp)
gfp               600 include/linux/fscache.h 						     context, gfp);
gfp               626 include/linux/fscache.h 		       gfp_t gfp)
gfp               629 include/linux/fscache.h 		return __fscache_alloc_page(cookie, page, gfp);
gfp               677 include/linux/fscache.h 		       gfp_t gfp)
gfp               680 include/linux/fscache.h 		return __fscache_write_page(cookie, page, object_size, gfp);
gfp               763 include/linux/fscache.h 				gfp_t gfp)
gfp               766 include/linux/fscache.h 		return __fscache_maybe_release_page(cookie, page, gfp);
gfp               149 include/linux/greybus/operation.h 				gfp_t gfp);
gfp               154 include/linux/greybus/operation.h 				size_t response_size, gfp_t gfp)
gfp               157 include/linux/greybus/operation.h 						response_size, 0, gfp);
gfp               164 include/linux/greybus/operation.h 				gfp_t gfp);
gfp               170 include/linux/greybus/operation.h 					size_t response_size, gfp_t gfp);
gfp               175 include/linux/greybus/operation.h 				gfp_t gfp);
gfp               270 include/linux/idr.h static inline int ida_alloc(struct ida *ida, gfp_t gfp)
gfp               272 include/linux/idr.h 	return ida_alloc_range(ida, 0, ~0, gfp);
gfp               287 include/linux/idr.h static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp)
gfp               289 include/linux/idr.h 	return ida_alloc_range(ida, min, ~0, gfp);
gfp               304 include/linux/idr.h static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp)
gfp               306 include/linux/idr.h 	return ida_alloc_range(ida, 0, max, gfp);
gfp               314 include/linux/idr.h #define ida_simple_get(ida, start, end, gfp)	\
gfp               315 include/linux/idr.h 			ida_alloc_range(ida, start, (end) - 1, gfp)
gfp               135 include/linux/igmp.h extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp);
gfp               141 include/linux/igmp.h 			      gfp_t gfp);
gfp               487 include/linux/kernel.h char *kasprintf(gfp_t gfp, const char *fmt, ...);
gfp               489 include/linux/kernel.h char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
gfp               491 include/linux/kernel.h const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
gfp               386 include/linux/kexec.h static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp) { return 0; }
gfp                19 include/linux/kmemleak.h 			   gfp_t gfp) __ref;
gfp                21 include/linux/kmemleak.h 				  gfp_t gfp) __ref;
gfp                23 include/linux/kmemleak.h 			     gfp_t gfp) __ref;
gfp                30 include/linux/kmemleak.h extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
gfp                33 include/linux/kmemleak.h 				gfp_t gfp) __ref;
gfp                40 include/linux/kmemleak.h 					    gfp_t gfp)
gfp                43 include/linux/kmemleak.h 		kmemleak_alloc(ptr, size, min_count, gfp);
gfp                63 include/linux/kmemleak.h 				  gfp_t gfp)
gfp                68 include/linux/kmemleak.h 					    gfp_t gfp)
gfp                72 include/linux/kmemleak.h 					 gfp_t gfp)
gfp                76 include/linux/kmemleak.h 				    gfp_t gfp)
gfp               100 include/linux/kmemleak.h static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
gfp               110 include/linux/kmemleak.h 				       int min_count, gfp_t gfp)
gfp              1621 include/linux/lsm_hooks.h 	int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
gfp              1624 include/linux/lsm_hooks.h 				gfp_t gfp);
gfp              1772 include/linux/lsm_hooks.h 						gfp_t gfp);
gfp              1377 include/linux/memcontrol.h int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
gfp              1379 include/linux/memcontrol.h int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
gfp              1404 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
gfp              1407 include/linux/memcontrol.h 		return __memcg_kmem_charge(page, gfp, order);
gfp              1417 include/linux/memcontrol.h static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
gfp              1421 include/linux/memcontrol.h 		return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
gfp              1446 include/linux/memcontrol.h static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
gfp              1455 include/linux/memcontrol.h static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
gfp              2402 include/linux/netdevice.h #define __netdev_alloc_pcpu_stats(type, gfp)				\
gfp              2404 include/linux/netdevice.h 	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
gfp               211 include/linux/pagemap.h extern struct page *__page_cache_alloc(gfp_t gfp);
gfp               213 include/linux/pagemap.h static inline struct page *__page_cache_alloc(gfp_t gfp)
gfp               215 include/linux/pagemap.h 	return alloc_pages(gfp, 0);
gfp               111 include/linux/percpu-refcount.h 				 gfp_t gfp);
gfp               134 include/linux/percpu.h extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
gfp               139 include/linux/percpu.h #define alloc_percpu_gfp(type, gfp)					\
gfp               141 include/linux/percpu.h 						__alignof__(type), gfp)
gfp                31 include/linux/percpu_counter.h int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
gfp                34 include/linux/percpu_counter.h #define percpu_counter_init(fbc, value, gfp)				\
gfp                38 include/linux/percpu_counter.h 		__percpu_counter_init(fbc, value, gfp, &__key);		\
gfp               101 include/linux/percpu_counter.h 				      gfp_t gfp)
gfp               466 include/linux/ptr_ring.h static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
gfp               470 include/linux/ptr_ring.h 	return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
gfp               486 include/linux/ptr_ring.h static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
gfp               488 include/linux/ptr_ring.h 	r->queue = __ptr_ring_init_queue_alloc(size, gfp);
gfp               558 include/linux/ptr_ring.h 					   int size, gfp_t gfp,
gfp               589 include/linux/ptr_ring.h static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
gfp               593 include/linux/ptr_ring.h 	void **queue = __ptr_ring_init_queue_alloc(size, gfp);
gfp               602 include/linux/ptr_ring.h 	old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
gfp               621 include/linux/ptr_ring.h 					   gfp_t gfp, void (*destroy)(void *))
gfp               627 include/linux/ptr_ring.h 	queues = kmalloc_array(nrings, sizeof(*queues), gfp);
gfp               632 include/linux/ptr_ring.h 		queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
gfp               641 include/linux/ptr_ring.h 						  size, gfp, destroy);
gfp               252 include/linux/radix-tree.h 			      struct radix_tree_iter *iter, gfp_t gfp,
gfp                83 include/linux/rslib.h 			       int nroots, gfp_t gfp);
gfp               286 include/linux/scatterlist.h 				    gfp_t gfp, unsigned int *nent_p);
gfp               287 include/linux/scatterlist.h struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
gfp               377 include/linux/security.h int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
gfp               379 include/linux/security.h int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
gfp               968 include/linux/security.h static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
gfp               978 include/linux/security.h 					 gfp_t gfp)
gfp              1575 include/linux/security.h 			       struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
gfp              1595 include/linux/security.h 					     gfp_t gfp)
gfp               180 include/linux/skb_array.h static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
gfp               182 include/linux/skb_array.h 	return ptr_ring_init(&a->ring, size, gfp);
gfp               196 include/linux/skb_array.h static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
gfp               198 include/linux/skb_array.h 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
gfp               203 include/linux/skb_array.h 					    gfp_t gfp)
gfp               207 include/linux/skb_array.h 					nrings, size, gfp,
gfp              2801 include/linux/skbuff.h 		unsigned int length, gfp_t gfp)
gfp              2803 include/linux/skbuff.h 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
gfp              3540 include/linux/skbuff.h 			     gfp_t gfp);
gfp               698 include/linux/spi/spi.h 			   size_t size, gfp_t gfp);
gfp              1080 include/linux/spi/spi.h 	gfp_t gfp);
gfp              1089 include/linux/spi/spi.h 				       gfp_t gfp);
gfp               465 include/linux/spinlock.h 			     gfp_t gfp, const char *name,
gfp               468 include/linux/spinlock.h #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp)    \
gfp               474 include/linux/spinlock.h 					       cpu_mult, gfp, #locks, &key); \
gfp               182 include/linux/string.h extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
gfp               183 include/linux/string.h extern const char *kstrdup_const(const char *s, gfp_t gfp);
gfp               184 include/linux/string.h extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
gfp               185 include/linux/string.h extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
gfp               186 include/linux/string.h extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
gfp               188 include/linux/string.h extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
gfp               443 include/linux/string.h extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
gfp               444 include/linux/string.h __FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
gfp               451 include/linux/string.h 	return __real_kmemdup(p, size, gfp);
gfp                78 include/linux/string_helpers.h char *kstrdup_quotable(const char *src, gfp_t gfp);
gfp                79 include/linux/string_helpers.h char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
gfp                80 include/linux/string_helpers.h char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
gfp               135 include/linux/sunrpc/xdr.h int	xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
gfp                41 include/linux/virtio.h 			 gfp_t gfp);
gfp                46 include/linux/virtio.h 			gfp_t gfp);
gfp                52 include/linux/virtio.h 			    gfp_t gfp);
gfp                59 include/linux/virtio.h 		      gfp_t gfp);
gfp                33 include/linux/vmpressure.h extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
gfp                35 include/linux/vmpressure.h extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
gfp                47 include/linux/vmpressure.h static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
gfp                49 include/linux/vmpressure.h static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
gfp               196 include/linux/vringh.h 			gfp_t gfp);
gfp               346 include/linux/writeback.h int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
gfp               547 include/linux/xarray.h 		void *entry, gfp_t gfp)
gfp               552 include/linux/xarray.h 	curr = __xa_store(xa, index, entry, gfp);
gfp               573 include/linux/xarray.h 		void *entry, gfp_t gfp)
gfp               578 include/linux/xarray.h 	curr = __xa_store(xa, index, entry, gfp);
gfp               648 include/linux/xarray.h 			void *old, void *entry, gfp_t gfp)
gfp               653 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
gfp               675 include/linux/xarray.h 			void *old, void *entry, gfp_t gfp)
gfp               680 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
gfp               702 include/linux/xarray.h 			void *old, void *entry, gfp_t gfp)
gfp               707 include/linux/xarray.h 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
gfp               731 include/linux/xarray.h 		unsigned long index, void *entry, gfp_t gfp)
gfp               736 include/linux/xarray.h 	err = __xa_insert(xa, index, entry, gfp);
gfp               760 include/linux/xarray.h 		unsigned long index, void *entry, gfp_t gfp)
gfp               765 include/linux/xarray.h 	err = __xa_insert(xa, index, entry, gfp);
gfp               789 include/linux/xarray.h 		unsigned long index, void *entry, gfp_t gfp)
gfp               794 include/linux/xarray.h 	err = __xa_insert(xa, index, entry, gfp);
gfp               818 include/linux/xarray.h 		void *entry, struct xa_limit limit, gfp_t gfp)
gfp               823 include/linux/xarray.h 	err = __xa_alloc(xa, id, entry, limit, gfp);
gfp               847 include/linux/xarray.h 		void *entry, struct xa_limit limit, gfp_t gfp)
gfp               852 include/linux/xarray.h 	err = __xa_alloc(xa, id, entry, limit, gfp);
gfp               876 include/linux/xarray.h 		void *entry, struct xa_limit limit, gfp_t gfp)
gfp               881 include/linux/xarray.h 	err = __xa_alloc(xa, id, entry, limit, gfp);
gfp               909 include/linux/xarray.h 		struct xa_limit limit, u32 *next, gfp_t gfp)
gfp               914 include/linux/xarray.h 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
gfp               942 include/linux/xarray.h 		struct xa_limit limit, u32 *next, gfp_t gfp)
gfp               947 include/linux/xarray.h 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
gfp               975 include/linux/xarray.h 		struct xa_limit limit, u32 *next, gfp_t gfp)
gfp               980 include/linux/xarray.h 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
gfp              1005 include/linux/xarray.h int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
gfp              1007 include/linux/xarray.h 	return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
gfp              1023 include/linux/xarray.h int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
gfp              1025 include/linux/xarray.h 	return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
gfp              1041 include/linux/xarray.h int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
gfp              1043 include/linux/xarray.h 	return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
gfp                13 include/linux/zbud.h struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops);
gfp                15 include/linux/zbud.h int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
gfp                43 include/linux/zpool.h 			gfp_t gfp, const struct zpool_ops *ops);
gfp                51 include/linux/zpool.h int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
gfp                90 include/linux/zpool.h 			gfp_t gfp,
gfp                96 include/linux/zpool.h 	int (*malloc)(void *pool, size_t size, gfp_t gfp,
gfp              5655 include/net/cfg80211.h 			       gfp_t gfp);
gfp              5662 include/net/cfg80211.h 				s32 signal, gfp_t gfp)
gfp              5670 include/net/cfg80211.h 	return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
gfp              5677 include/net/cfg80211.h 			  s32 signal, gfp_t gfp)
gfp              5685 include/net/cfg80211.h 	return cfg80211_inform_bss_frame_data(wiphy, &data, mgmt, len, gfp);
gfp              5770 include/net/cfg80211.h 			 gfp_t gfp);
gfp              5779 include/net/cfg80211.h 			  s32 signal, gfp_t gfp)
gfp              5789 include/net/cfg80211.h 					gfp);
gfp              5798 include/net/cfg80211.h 		    s32 signal, gfp_t gfp)
gfp              5808 include/net/cfg80211.h 					gfp);
gfp              6015 include/net/cfg80211.h 				  const u8 *tsc, gfp_t gfp);
gfp              6033 include/net/cfg80211.h 			  struct ieee80211_channel *channel, gfp_t gfp);
gfp              6050 include/net/cfg80211.h 		int sig_dbm, gfp_t gfp);
gfp              6116 include/net/cfg80211.h 					   int approxlen, gfp_t gfp);
gfp              6118 include/net/cfg80211.h void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp);
gfp              6196 include/net/cfg80211.h 			     int approxlen, int event_idx, gfp_t gfp)
gfp              6200 include/net/cfg80211.h 					  0, event_idx, approxlen, gfp);
gfp              6231 include/net/cfg80211.h 				  int event_idx, gfp_t gfp)
gfp              6235 include/net/cfg80211.h 					  portid, event_idx, approxlen, gfp);
gfp              6246 include/net/cfg80211.h static inline void cfg80211_vendor_event(struct sk_buff *skb, gfp_t gfp)
gfp              6248 include/net/cfg80211.h 	__cfg80211_send_event_skb(skb, gfp);
gfp              6332 include/net/cfg80211.h cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp)
gfp              6336 include/net/cfg80211.h 					  approxlen, gfp);
gfp              6349 include/net/cfg80211.h static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
gfp              6351 include/net/cfg80211.h 	__cfg80211_send_event_skb(skb, gfp);
gfp              6442 include/net/cfg80211.h 			   gfp_t gfp);
gfp              6486 include/net/cfg80211.h 		     size_t resp_ie_len, int status, gfp_t gfp,
gfp              6501 include/net/cfg80211.h 	cfg80211_connect_done(dev, &params, gfp);
gfp              6528 include/net/cfg80211.h 			u16 status, gfp_t gfp)
gfp              6531 include/net/cfg80211.h 			     resp_ie_len, status, gfp,
gfp              6555 include/net/cfg80211.h 			 const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
gfp              6559 include/net/cfg80211.h 			     gfp, timeout_reason);
gfp              6604 include/net/cfg80211.h 		     gfp_t gfp);
gfp              6621 include/net/cfg80211.h 			      gfp_t gfp);
gfp              6638 include/net/cfg80211.h 			   bool locally_generated, gfp_t gfp);
gfp              6651 include/net/cfg80211.h 			       unsigned int duration, gfp_t gfp);
gfp              6662 include/net/cfg80211.h 					gfp_t gfp);
gfp              6672 include/net/cfg80211.h 			      struct ieee80211_channel *chan, gfp_t gfp);
gfp              6680 include/net/cfg80211.h int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp);
gfp              6704 include/net/cfg80211.h 		      struct station_info *sinfo, gfp_t gfp);
gfp              6714 include/net/cfg80211.h 			    struct station_info *sinfo, gfp_t gfp);
gfp              6724 include/net/cfg80211.h 				    const u8 *mac_addr, gfp_t gfp)
gfp              6726 include/net/cfg80211.h 	cfg80211_del_sta_sinfo(dev, mac_addr, NULL, gfp);
gfp              6746 include/net/cfg80211.h 			  gfp_t gfp);
gfp              6782 include/net/cfg80211.h 			     const u8 *buf, size_t len, bool ack, gfp_t gfp);
gfp              6819 include/net/cfg80211.h 			      s32 rssi_level, gfp_t gfp);
gfp              6831 include/net/cfg80211.h 				 const u8 *peer, u32 num_packets, gfp_t gfp);
gfp              6846 include/net/cfg80211.h 			     u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
gfp              6855 include/net/cfg80211.h void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp);
gfp              6866 include/net/cfg80211.h 			  struct cfg80211_chan_def *chandef, gfp_t gfp);
gfp              6880 include/net/cfg80211.h 				       gfp_t gfp);
gfp              6895 include/net/cfg80211.h 			enum nl80211_radar_event event, gfp_t gfp);
gfp              6906 include/net/cfg80211.h 			       const u8 *replay_ctr, gfp_t gfp);
gfp              6917 include/net/cfg80211.h 				     const u8 *bssid, bool preauth, gfp_t gfp);
gfp              6932 include/net/cfg80211.h 				const u8 *addr, gfp_t gfp);
gfp              6948 include/net/cfg80211.h 					const u8 *addr, gfp_t gfp);
gfp              6962 include/net/cfg80211.h 			   bool is_valid_ack_signal, gfp_t gfp);
gfp              7074 include/net/cfg80211.h 				u16 reason_code, gfp_t gfp);
gfp              7220 include/net/cfg80211.h 				   gfp_t gfp);
gfp              7232 include/net/cfg80211.h void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp);
gfp              7286 include/net/cfg80211.h 			 gfp_t gfp);
gfp              7382 include/net/cfg80211.h 			struct cfg80211_nan_match_params *match, gfp_t gfp);
gfp              7398 include/net/cfg80211.h 				  u64 cookie, gfp_t gfp);
gfp              7412 include/net/cfg80211.h 				   gfp_t gfp);
gfp              7424 include/net/cfg80211.h 			  gfp_t gfp);
gfp              7437 include/net/cfg80211.h 			    gfp_t gfp);
gfp              7513 include/net/cfg80211.h 				    gfp_t gfp);
gfp                13 include/net/dn_nsp.h void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
gfp                15 include/net/dn_nsp.h 		      unsigned short reason, gfp_t gfp);
gfp                24 include/net/dn_nsp.h void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
gfp                87 include/net/dst_cache.h int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp);
gfp                21 include/net/hwbm.h int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
gfp                26 include/net/hwbm.h static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
gfp               224 include/net/iucv/iucv.h static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
gfp               228 include/net/iucv/iucv.h 	path = kzalloc(sizeof(struct iucv_path), gfp);
gfp              5136 include/net/mac80211.h 				const u8 *replay_ctr, gfp_t gfp);
gfp              5719 include/net/mac80211.h 			       gfp_t gfp);
gfp              5727 include/net/mac80211.h void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp);
gfp              5941 include/net/mac80211.h 	void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
gfp              6121 include/net/mac80211.h 				    gfp_t gfp);
gfp              6196 include/net/mac80211.h 				 u16 reason_code, gfp_t gfp);
gfp              6403 include/net/mac80211.h 				   gfp_t gfp);
gfp              6418 include/net/mac80211.h 			      gfp_t gfp);
gfp               345 include/net/net_namespace.h int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
gfp               236 include/net/netfilter/nf_conntrack.h 				   gfp_t gfp);
gfp                30 include/net/netfilter/nf_conntrack_acct.h struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
gfp                39 include/net/netfilter/nf_conntrack_acct.h 	acct = nf_ct_ext_add(ct, NF_CT_EXT_ACCT, gfp);
gfp                42 include/net/netfilter/nf_conntrack_ecache.h nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
gfp                55 include/net/netfilter/nf_conntrack_ecache.h 	e = nf_ct_ext_add(ct, NF_CT_EXT_ECACHE, gfp);
gfp                85 include/net/netfilter/nf_conntrack_extend.h void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
gfp               113 include/net/netfilter/nf_conntrack_helper.h struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
gfp                61 include/net/netfilter/nf_conntrack_timeout.h 					      gfp_t gfp)
gfp                66 include/net/netfilter/nf_conntrack_timeout.h 	timeout_ext = nf_ct_ext_add(ct, NF_CT_EXT_TIMEOUT, gfp);
gfp                27 include/net/netfilter/nf_conntrack_timestamp.h struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
gfp                35 include/net/netfilter/nf_conntrack_timestamp.h 	return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
gfp               659 include/net/netfilter/nf_tables.h 			u64 timeout, u64 expiration, gfp_t gfp);
gfp               692 include/net/netfilter/nf_tables.h 						gfp_t gfp);
gfp               703 include/net/netfilter/nf_tables.h 		       gfp_t gfp)
gfp               710 include/net/netfilter/nf_tables.h 	return nft_set_gc_batch_alloc(set, gfp);
gfp              1095 include/net/netfilter/nf_tables.h 		    int event, int family, int report, gfp_t gfp);
gfp              1666 include/net/netlink.h static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
gfp              1668 include/net/netlink.h 	return kmemdup(nla_data(src), nla_len(src), gfp);
gfp               266 include/net/nfc/nfc.h struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
gfp               117 include/net/page_pool.h struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
gfp               121 include/net/page_pool.h 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
gfp               123 include/net/page_pool.h 	return page_pool_alloc_pages(pool, gfp);
gfp                71 include/net/sctp/auth.h struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
gfp                73 include/net/sctp/auth.h int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
gfp                79 include/net/sctp/auth.h 				gfp_t gfp);
gfp                80 include/net/sctp/auth.h int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
gfp                94 include/net/sctp/auth.h 			      struct sctp_shared_key *ep_key, gfp_t gfp);
gfp               110 include/net/sctp/auth.h int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
gfp                83 include/net/sctp/sctp.h 			      enum sctp_scope, gfp_t gfp, int flags);
gfp                25 include/net/sctp/stream_interleave.h 					    int len, __u8 flags, gfp_t gfp);
gfp                29 include/net/sctp/stream_interleave.h 				 struct sctp_chunk *chunk, gfp_t gfp);
gfp                33 include/net/sctp/stream_interleave.h 				 struct sctp_chunk *chunk, gfp_t gfp);
gfp                34 include/net/sctp/stream_interleave.h 	void	(*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
gfp                35 include/net/sctp/stream_interleave.h 	void	(*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
gfp                22 include/net/sctp/stream_sched.h 		   gfp_t gfp);
gfp                28 include/net/sctp/stream_sched.h 	int (*init_sid)(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
gfp                48 include/net/sctp/stream_sched.h 			 __u16 value, gfp_t gfp);
gfp                54 include/net/sctp/stream_sched.h int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
gfp               384 include/net/sctp/structs.h 		     gfp_t gfp);
gfp               675 include/net/sctp/structs.h 				 struct sock *, gfp_t gfp);
gfp               742 include/net/sctp/structs.h 					  int one_packet, gfp_t gfp);
gfp              1109 include/net/sctp/structs.h void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp);
gfp              1169 include/net/sctp/structs.h 			enum sctp_scope scope, gfp_t gfp,
gfp              1173 include/net/sctp/structs.h 			gfp_t gfp);
gfp              1175 include/net/sctp/structs.h 		       int new_size, __u8 addr_state, gfp_t gfp);
gfp              1191 include/net/sctp/structs.h 					 gfp_t gfp);
gfp              1193 include/net/sctp/structs.h 			   __u16 port, gfp_t gfp);
gfp              1380 include/net/sctp/structs.h 		      struct sctp_init_chunk *init, gfp_t gfp);
gfp              2104 include/net/sctp/structs.h 		     enum sctp_scope scope, gfp_t gfp);
gfp              2118 include/net/sctp/structs.h 				     const gfp_t gfp,
gfp              2145 include/net/sctp/structs.h 				     enum sctp_scope scope, gfp_t gfp);
gfp              2148 include/net/sctp/structs.h 					 gfp_t gfp);
gfp                85 include/net/sctp/tsnmap.h 				     __u32 initial_tsn, gfp_t gfp);
gfp                81 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp                89 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp                95 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp               101 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp               106 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp               111 include/net/sctp/ulpevent.h 	__u32 flags, gfp_t gfp);
gfp               114 include/net/sctp/ulpevent.h 	const struct sctp_association *asoc, gfp_t gfp);
gfp               118 include/net/sctp/ulpevent.h 	gfp_t gfp);
gfp               122 include/net/sctp/ulpevent.h 	__u32 indication, gfp_t gfp);
gfp               125 include/net/sctp/ulpevent.h 	const struct sctp_association *asoc, gfp_t gfp);
gfp               129 include/net/sctp/ulpevent.h 	__u16 stream_num, __be16 *stream_list, gfp_t gfp);
gfp               133 include/net/sctp/ulpevent.h 	 __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
gfp               137 include/net/sctp/ulpevent.h 	__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
gfp              2243 include/net/sock.h struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
gfp                55 include/net/tc_act/tc_ife.h int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
gfp                56 include/net/tc_act/tc_ife.h int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
gfp               583 include/net/tcp.h 		 unsigned int mss_now, gfp_t gfp);
gfp              1588 include/net/tcp.h 		   gfp_t gfp);
gfp              1630 include/net/xfrm.h struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
gfp              2245 include/rdma/ib_verbs.h #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp)                         \
gfp              2246 include/rdma/ib_verbs.h 	((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
gfp                81 include/scsi/scsi_dh.h 							gfp_t gfp)
gfp               264 kernel/bpf/cpumap.c 		gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
gfp               299 kernel/bpf/cpumap.c 		m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs);
gfp               337 kernel/bpf/cpumap.c 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
gfp               345 kernel/bpf/cpumap.c 	rcpu = kzalloc_node(sizeof(*rcpu), gfp, numa);
gfp               351 kernel/bpf/cpumap.c 					 sizeof(void *), gfp);
gfp               361 kernel/bpf/cpumap.c 	rcpu->queue = kzalloc_node(sizeof(*rcpu->queue), gfp, numa);
gfp               365 kernel/bpf/cpumap.c 	err = ptr_ring_init(rcpu->queue, qsize, gfp);
gfp               589 kernel/bpf/devmap.c 	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
gfp               594 kernel/bpf/devmap.c 	dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node);
gfp               599 kernel/bpf/devmap.c 					sizeof(void *), gfp);
gfp               231 kernel/dma/contiguous.c struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
gfp               243 kernel/dma/contiguous.c 	if (cma && gfpflags_allow_blocking(gfp)) {
gfp               247 kernel/dma/contiguous.c 		page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
gfp               628 kernel/dma/debug.c static int dma_debug_create_entries(gfp_t gfp)
gfp               633 kernel/dma/debug.c 	entry = (void *)get_zeroed_page(gfp);
gfp                87 kernel/dma/direct.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp                95 kernel/dma/direct.c 		gfp |= __GFP_NOWARN;
gfp                98 kernel/dma/direct.c 	gfp &= ~__GFP_ZERO;
gfp                99 kernel/dma/direct.c 	gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
gfp               101 kernel/dma/direct.c 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
gfp               108 kernel/dma/direct.c 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
gfp               115 kernel/dma/direct.c 		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
gfp               116 kernel/dma/direct.c 			gfp |= GFP_DMA32;
gfp               120 kernel/dma/direct.c 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
gfp               121 kernel/dma/direct.c 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
gfp               130 kernel/dma/direct.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               135 kernel/dma/direct.c 	page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
gfp               206 kernel/dma/direct.c 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
gfp               210 kernel/dma/direct.c 		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
gfp               211 kernel/dma/direct.c 	return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
gfp                82 kernel/dma/mapping.c 		gfp_t gfp, unsigned long attrs)
gfp                87 kernel/dma/mapping.c 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
gfp                91 kernel/dma/mapping.c 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
gfp                11 kernel/dma/virt.c 			    dma_addr_t *dma_handle, gfp_t gfp,
gfp                16 kernel/dma/virt.c 	ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
gfp              1575 kernel/rcu/tree.c static bool rcu_gp_fqs_check_wake(int *gfp)
gfp              1580 kernel/rcu/tree.c 	*gfp = READ_ONCE(rcu_state.gp_flags);
gfp              1581 kernel/rcu/tree.c 	if (*gfp & RCU_GP_FLAG_FQS)
gfp                60 lib/argv_split.c char **argv_split(gfp_t gfp, const char *str, int *argcp)
gfp                67 lib/argv_split.c 	argv_str = kstrndup(str, KMALLOC_MAX_SIZE - 1, gfp);
gfp                72 lib/argv_split.c 	argv = kmalloc_array(argc + 2, sizeof(*argv), gfp);
gfp                93 lib/btree.c    static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
gfp                97 lib/btree.c    	node = mempool_alloc(head->mempool, gfp);
gfp               414 lib/btree.c    		      gfp_t gfp)
gfp               419 lib/btree.c    	node = btree_node_alloc(head, gfp);
gfp               450 lib/btree.c    			      gfp_t gfp)
gfp               457 lib/btree.c    		err = btree_grow(head, geo, gfp);
gfp               473 lib/btree.c    		new = btree_node_alloc(head, gfp);
gfp               478 lib/btree.c    				new, level + 1, gfp);
gfp               511 lib/btree.c    		unsigned long *key, void *val, gfp_t gfp)
gfp               514 lib/btree.c    	return btree_insert_level(head, geo, key, val, 1, gfp);
gfp               640 lib/btree.c    		struct btree_geo *geo, gfp_t gfp)
gfp               664 lib/btree.c    		err = btree_insert(target, geo, key, val, gfp);
gfp                15 lib/bucket_locks.c 			     size_t max_size, unsigned int cpu_mult, gfp_t gfp,
gfp                34 lib/bucket_locks.c 		tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
gfp               131 lib/debugobjects.c 	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
gfp               170 lib/debugobjects.c 			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
gfp                38 lib/flex_proportions.c int fprop_global_init(struct fprop_global *p, gfp_t gfp)
gfp                44 lib/flex_proportions.c 	err = percpu_counter_init(&p->events, 1, gfp);
gfp               172 lib/flex_proportions.c int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
gfp               176 lib/flex_proportions.c 	err = percpu_counter_init(&pl->events, 0, gfp);
gfp                34 lib/idr.c      			unsigned long max, gfp_t gfp)
gfp                46 lib/idr.c      	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
gfp                79 lib/idr.c      int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
gfp                87 lib/idr.c      	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
gfp               117 lib/idr.c      int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
gfp               125 lib/idr.c      	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
gfp               128 lib/idr.c      		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
gfp               380 lib/idr.c      			gfp_t gfp)
gfp               453 lib/idr.c      	if (xas_nomem(&xas, gfp)) {
gfp               465 lib/idr.c      	alloc = kzalloc(sizeof(*bitmap), gfp);
gfp                15 lib/kasprintf.c char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
gfp                25 lib/kasprintf.c 	p = kmalloc_track_caller(first+1, gfp);
gfp                43 lib/kasprintf.c const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list ap)
gfp                46 lib/kasprintf.c 		return kstrdup_const(fmt, gfp);
gfp                48 lib/kasprintf.c 		return kstrdup_const(va_arg(ap, const char*), gfp);
gfp                49 lib/kasprintf.c 	return kvasprintf(gfp, fmt, ap);
gfp                53 lib/kasprintf.c char *kasprintf(gfp_t gfp, const char *fmt, ...)
gfp                59 lib/kasprintf.c 	p = kvasprintf(gfp, fmt, ap);
gfp                61 lib/percpu-refcount.c 		    unsigned int flags, gfp_t gfp)
gfp                68 lib/percpu-refcount.c 		__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
gfp               122 lib/percpu_counter.c int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
gfp               130 lib/percpu_counter.c 	fbc->counters = alloc_percpu_gfp(s32, gfp);
gfp               417 lib/radix-tree.c static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
gfp               434 lib/radix-tree.c 		struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
gfp               616 lib/radix-tree.c 	gfp_t gfp = root_gfp_mask(root);
gfp               622 lib/radix-tree.c 		int error = radix_tree_extend(root, gfp, max, shift);
gfp               633 lib/radix-tree.c 			child = radix_tree_node_alloc(gfp, node, root, shift,
gfp              1486 lib/radix-tree.c 			      struct radix_tree_iter *iter, gfp_t gfp,
gfp              1502 lib/radix-tree.c 		int error = radix_tree_extend(root, gfp, start, shift);
gfp              1515 lib/radix-tree.c 			child = radix_tree_node_alloc(gfp, node, root, shift,
gfp                71 lib/reed_solomon/reed_solomon.c 				   int fcr, int prim, int nroots, gfp_t gfp)
gfp                76 lib/reed_solomon/reed_solomon.c 	rs = kzalloc(sizeof(*rs), gfp);
gfp                91 lib/reed_solomon/reed_solomon.c 	rs->alpha_to = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
gfp                95 lib/reed_solomon/reed_solomon.c 	rs->index_of = kmalloc_array(rs->nn + 1, sizeof(uint16_t), gfp);
gfp                99 lib/reed_solomon/reed_solomon.c 	rs->genpoly = kmalloc_array(rs->nroots + 1, sizeof(uint16_t), gfp);
gfp               215 lib/reed_solomon/reed_solomon.c 					   int prim, int nroots, gfp_t gfp)
gfp               237 lib/reed_solomon/reed_solomon.c 	rs = kzalloc(sizeof(*rs) + bsize, gfp);
gfp               266 lib/reed_solomon/reed_solomon.c 	rs->codec = codec_init(symsize, gfpoly, gffunc, fcr, prim, nroots, gfp);
gfp               289 lib/reed_solomon/reed_solomon.c 			       int nroots, gfp_t gfp)
gfp               291 lib/reed_solomon/reed_solomon.c 	return init_rs_internal(symsize, gfpoly, NULL, fcr, prim, nroots, gfp);
gfp               140 lib/rhashtable.c 						      gfp_t gfp)
gfp               151 lib/rhashtable.c 	tbl = kzalloc(size, gfp);
gfp               168 lib/rhashtable.c 					       gfp_t gfp)
gfp               175 lib/rhashtable.c 	tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
gfp               179 lib/rhashtable.c 	if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
gfp               180 lib/rhashtable.c 		tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
gfp               488 lib/scatterlist.c 				    gfp_t gfp, unsigned int *nent_p)
gfp               507 lib/scatterlist.c 			    (gfp & ~GFP_DMA) | __GFP_ZERO);
gfp               515 lib/scatterlist.c 		page = alloc_pages(gfp, order);
gfp               540 lib/scatterlist.c struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
gfp               543 lib/scatterlist.c 	return sgl_alloc_order(length, 0, false, gfp, nent_p);
gfp               569 lib/string_helpers.c char *kstrdup_quotable(const char *src, gfp_t gfp)
gfp               581 lib/string_helpers.c 	dst = kmalloc(dlen + 1, gfp);
gfp               597 lib/string_helpers.c char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp)
gfp               619 lib/string_helpers.c 	quoted = kstrdup_quotable(buffer, gfp);
gfp               630 lib/string_helpers.c char *kstrdup_quotable_file(struct file *file, gfp_t gfp)
gfp               635 lib/string_helpers.c 		return kstrdup("<unknown>", gfp);
gfp               640 lib/string_helpers.c 		return kstrdup("<no_memory>", gfp);
gfp               644 lib/string_helpers.c 		pathname = kstrdup("<too_long>", gfp);
gfp               646 lib/string_helpers.c 		pathname = kstrdup_quotable(pathname, gfp);
gfp               165 lib/test_firmware.c static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
gfp               167 lib/test_firmware.c 	*dst = kstrndup(name, count, gfp);
gfp               604 lib/test_kmod.c static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
gfp               606 lib/test_kmod.c 	*dst = kstrndup(name, count, gfp);
gfp               567 lib/test_printf.c 	gfp_t gfp;
gfp               586 lib/test_printf.c 	gfp = GFP_TRANSHUGE;
gfp               587 lib/test_printf.c 	test("GFP_TRANSHUGE", "%pGg", &gfp);
gfp               589 lib/test_printf.c 	gfp = GFP_ATOMIC|__GFP_DMA;
gfp               590 lib/test_printf.c 	test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp);
gfp               592 lib/test_printf.c 	gfp = __GFP_ATOMIC;
gfp               593 lib/test_printf.c 	test("__GFP_ATOMIC", "%pGg", &gfp);
gfp               600 lib/test_printf.c 	gfp = ~__GFP_BITS_MASK;
gfp               601 lib/test_printf.c 	snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp);
gfp               602 lib/test_printf.c 	test(cmp_buffer, "%pGg", &gfp);
gfp               605 lib/test_printf.c 							(unsigned long) gfp);
gfp               606 lib/test_printf.c 	gfp |= __GFP_ATOMIC;
gfp               607 lib/test_printf.c 	test(cmp_buffer, "%pGg", &gfp);
gfp                40 lib/test_xarray.c static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
gfp                42 lib/test_xarray.c 	return xa_store(xa, index, xa_mk_index(index), gfp);
gfp                51 lib/test_xarray.c static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
gfp                56 lib/test_xarray.c 				gfp) != 0);
gfp                72 lib/test_xarray.c 		unsigned order, void *entry, gfp_t gfp)
gfp                81 lib/test_xarray.c 	} while (xas_nomem(&xas, gfp));
gfp               296 lib/xarray.c   bool xas_nomem(struct xa_state *xas, gfp_t gfp)
gfp               303 lib/xarray.c   		gfp |= __GFP_ACCOUNT;
gfp               304 lib/xarray.c   	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
gfp               322 lib/xarray.c   static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
gfp               332 lib/xarray.c   		gfp |= __GFP_ACCOUNT;
gfp               333 lib/xarray.c   	if (gfpflags_allow_blocking(gfp)) {
gfp               335 lib/xarray.c   		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
gfp               338 lib/xarray.c   		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
gfp               366 lib/xarray.c   		gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
gfp               369 lib/xarray.c   			gfp |= __GFP_ACCOUNT;
gfp               371 lib/xarray.c   		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
gfp              1382 lib/xarray.c   void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
gfp              1396 lib/xarray.c   	} while (__xas_nomem(&xas, gfp));
gfp              1419 lib/xarray.c   void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
gfp              1424 lib/xarray.c   	curr = __xa_store(xa, index, entry, gfp);
gfp              1448 lib/xarray.c   			void *old, void *entry, gfp_t gfp)
gfp              1463 lib/xarray.c   	} while (__xas_nomem(&xas, gfp));
gfp              1485 lib/xarray.c   int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
gfp              1504 lib/xarray.c   	} while (__xas_nomem(&xas, gfp));
gfp              1561 lib/xarray.c   		unsigned long last, void *entry, gfp_t gfp)
gfp              1590 lib/xarray.c   	} while (xas_nomem(&xas, gfp));
gfp              1615 lib/xarray.c   		struct xa_limit limit, gfp_t gfp)
gfp              1636 lib/xarray.c   	} while (__xas_nomem(&xas, gfp));
gfp              1664 lib/xarray.c   		struct xa_limit limit, u32 *next, gfp_t gfp)
gfp              1670 lib/xarray.c   	ret = __xa_alloc(xa, id, entry, limit, gfp);
gfp              1678 lib/xarray.c   		ret = __xa_alloc(xa, id, entry, limit, gfp);
gfp               285 mm/backing-dev.c 		   int blkcg_id, gfp_t gfp)
gfp               312 mm/backing-dev.c 	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
gfp               318 mm/backing-dev.c 	err = fprop_local_init_percpu(&wb->completions, gfp);
gfp               323 mm/backing-dev.c 		err = percpu_counter_init(&wb->stat[i], 0, gfp);
gfp               406 mm/backing-dev.c wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
gfp               441 mm/backing-dev.c 	new_congested = kzalloc(sizeof(*new_congested), gfp);
gfp               528 mm/backing-dev.c 		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
gfp               556 mm/backing-dev.c 	wb = kmalloc(sizeof(*wb), gfp);
gfp               562 mm/backing-dev.c 	ret = wb_init(wb, bdi, blkcg_css->id, gfp);
gfp               566 mm/backing-dev.c 	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
gfp               570 mm/backing-dev.c 	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
gfp               679 mm/backing-dev.c 				    gfp_t gfp)
gfp               683 mm/backing-dev.c 	might_sleep_if(gfpflags_allow_blocking(gfp));
gfp               690 mm/backing-dev.c 	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
gfp               966 mm/filemap.c   struct page *__page_cache_alloc(gfp_t gfp)
gfp               976 mm/filemap.c   			page = __alloc_pages_node(n, gfp, 0);
gfp               981 mm/filemap.c   	return alloc_pages(gfp, 0);
gfp              2760 mm/filemap.c   				gfp_t gfp)
gfp              2767 mm/filemap.c   		page = __page_cache_alloc(gfp);
gfp              2770 mm/filemap.c   		err = add_to_page_cache_lru(page, mapping, index, gfp);
gfp              2894 mm/filemap.c   				gfp_t gfp)
gfp              2896 mm/filemap.c   	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
gfp               576 mm/huge_memory.c 			struct page *page, gfp_t gfp)
gfp               586 mm/huge_memory.c 	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
gfp               710 mm/huge_memory.c 	gfp_t gfp;
gfp               759 mm/huge_memory.c 	gfp = alloc_hugepage_direct_gfpmask(vma);
gfp               760 mm/huge_memory.c 	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
gfp               766 mm/huge_memory.c 	return __do_huge_pmd_anonymous_page(vmf, page, gfp);
gfp               781 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
gfp               785 mm/khugepaged.c 	*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
gfp               845 mm/khugepaged.c khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
gfp               960 mm/khugepaged.c 	gfp_t gfp;
gfp               965 mm/khugepaged.c 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
gfp               974 mm/khugepaged.c 	new_page = khugepaged_alloc_page(hpage, gfp, node);
gfp               980 mm/khugepaged.c 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
gfp              1497 mm/khugepaged.c 	gfp_t gfp;
gfp              1510 mm/khugepaged.c 	gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
gfp              1512 mm/khugepaged.c 	new_page = khugepaged_alloc_page(hpage, gfp, node);
gfp              1518 mm/khugepaged.c 	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
gfp               115 mm/kmemleak.c  #define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
gfp               416 mm/kmemleak.c  static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
gfp               423 mm/kmemleak.c  		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
gfp               571 mm/kmemleak.c  					     int min_count, gfp_t gfp)
gfp               578 mm/kmemleak.c  	object = mem_pool_alloc(gfp);
gfp               785 mm/kmemleak.c  static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
gfp               799 mm/kmemleak.c  		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
gfp               888 mm/kmemleak.c  			  gfp_t gfp)
gfp               893 mm/kmemleak.c  		create_object((unsigned long)ptr, size, min_count, gfp);
gfp               907 mm/kmemleak.c  				 gfp_t gfp)
gfp               920 mm/kmemleak.c  				      size, 0, gfp);
gfp               933 mm/kmemleak.c  void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
gfp               942 mm/kmemleak.c  		create_object((unsigned long)area->addr, size, 2, gfp);
gfp              1082 mm/kmemleak.c  void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
gfp              1087 mm/kmemleak.c  		add_scan_area((unsigned long)ptr, size, gfp);
gfp              1119 mm/kmemleak.c  			       gfp_t gfp)
gfp              1122 mm/kmemleak.c  		kmemleak_alloc(__va(phys), size, min_count, gfp);
gfp              3013 mm/memcontrol.c int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
gfp              3020 mm/memcontrol.c 	ret = try_charge(memcg, gfp, nr_pages);
gfp              3032 mm/memcontrol.c 		if (gfp & __GFP_NOFAIL) {
gfp              3050 mm/memcontrol.c int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
gfp              3060 mm/memcontrol.c 		ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
gfp              4379 mm/memcontrol.c static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
gfp              4381 mm/memcontrol.c 	return wb_domain_init(&memcg->cgwb_domain, gfp);
gfp              4588 mm/memcontrol.c static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
gfp              1796 mm/mempolicy.c static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
gfp              1800 mm/mempolicy.c 			apply_policy_zone(policy, gfp_zone(gfp)) &&
gfp              1808 mm/mempolicy.c static int policy_node(gfp_t gfp, struct mempolicy *policy,
gfp              1819 mm/mempolicy.c 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
gfp              2057 mm/mempolicy.c static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
gfp              2062 mm/mempolicy.c 	page = __alloc_pages(gfp, order, nid);
gfp              2098 mm/mempolicy.c alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
gfp              2113 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, nid);
gfp              2133 mm/mempolicy.c 		nmask = policy_nodemask(gfp, pol);
gfp              2137 mm/mempolicy.c 						gfp | __GFP_THISNODE, order);
gfp              2145 mm/mempolicy.c 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
gfp              2147 mm/mempolicy.c 						gfp | __GFP_NORETRY, order);
gfp              2153 mm/mempolicy.c 	nmask = policy_nodemask(gfp, pol);
gfp              2154 mm/mempolicy.c 	preferred_nid = policy_node(gfp, pol, node);
gfp              2155 mm/mempolicy.c 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
gfp              2177 mm/mempolicy.c struct page *alloc_pages_current(gfp_t gfp, unsigned order)
gfp              2182 mm/mempolicy.c 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
gfp              2190 mm/mempolicy.c 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
gfp              2192 mm/mempolicy.c 		page = __alloc_pages_nodemask(gfp, order,
gfp              2193 mm/mempolicy.c 				policy_node(gfp, pol, numa_node_id()),
gfp              2194 mm/mempolicy.c 				policy_nodemask(gfp, pol));
gfp               647 mm/page-writeback.c int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
gfp               657 mm/page-writeback.c 	return fprop_global_init(&dom->completions, gfp);
gfp              4851 mm/page_alloc.c 	gfp_t gfp = gfp_mask;
gfp              4861 mm/page_alloc.c 		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
gfp                36 mm/percpu-km.c 			       int page_start, int page_end, gfp_t gfp)
gfp                47 mm/percpu-km.c static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
gfp                55 mm/percpu-km.c 	chunk = pcpu_alloc_chunk(gfp);
gfp                59 mm/percpu-km.c 	pages = alloc_pages(gfp, order_base_2(nr_pages));
gfp                83 mm/percpu-vm.c 			    gfp_t gfp)
gfp                88 mm/percpu-vm.c 	gfp |= __GFP_HIGHMEM;
gfp                94 mm/percpu-vm.c 			*pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
gfp               276 mm/percpu-vm.c 			       int page_start, int page_end, gfp_t gfp)
gfp               284 mm/percpu-vm.c 	if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
gfp               331 mm/percpu-vm.c static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
gfp               336 mm/percpu-vm.c 	chunk = pcpu_alloc_chunk(gfp);
gfp               503 mm/percpu.c    static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
gfp               509 mm/percpu.c    		return kzalloc(size, gfp);
gfp               511 mm/percpu.c    		return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
gfp              1404 mm/percpu.c    static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
gfp              1409 mm/percpu.c    	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
gfp              1418 mm/percpu.c    					   sizeof(chunk->alloc_map[0]), gfp);
gfp              1423 mm/percpu.c    					   sizeof(chunk->bound_map[0]), gfp);
gfp              1428 mm/percpu.c    					   sizeof(chunk->md_blocks[0]), gfp);
gfp              1526 mm/percpu.c    			       int page_start, int page_end, gfp_t gfp);
gfp              1529 mm/percpu.c    static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
gfp              1587 mm/percpu.c    				 gfp_t gfp)
gfp              1590 mm/percpu.c    	gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
gfp              1591 mm/percpu.c    	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
gfp              1592 mm/percpu.c    	bool do_warn = !(gfp & __GFP_NOWARN);
gfp              1627 mm/percpu.c    		if (gfp & __GFP_NOFAIL)
gfp              1737 mm/percpu.c    	kmemleak_alloc_percpu(ptr, size, gfp);
gfp              1781 mm/percpu.c    void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
gfp              1783 mm/percpu.c    	return pcpu_alloc(size, align, false, gfp);
gfp              1835 mm/percpu.c    	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
gfp              1917 mm/percpu.c    			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
gfp              1934 mm/percpu.c    		chunk = pcpu_create_chunk(gfp);
gfp               117 mm/readahead.c 		struct list_head *pages, unsigned int nr_pages, gfp_t gfp)
gfp               135 mm/readahead.c 		if (!add_to_page_cache_lru(page, mapping, page->index, gfp))
gfp               128 mm/rmap.c      static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
gfp               130 mm/rmap.c      	return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
gfp               139 mm/shmem.c     static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
gfp               140 mm/shmem.c     static int shmem_replace_page(struct page **pagep, gfp_t gfp,
gfp               144 mm/shmem.c     			     gfp_t gfp, struct vm_area_struct *vma,
gfp               148 mm/shmem.c     		gfp_t gfp, struct vm_area_struct *vma,
gfp               608 mm/shmem.c     				   pgoff_t index, void *expected, gfp_t gfp)
gfp               648 mm/shmem.c     	} while (xas_nomem(&xas, gfp));
gfp              1450 mm/shmem.c     static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
gfp              1460 mm/shmem.c     	page = swap_cluster_readahead(swap, gfp, &vmf);
gfp              1466 mm/shmem.c     static struct page *shmem_alloc_hugepage(gfp_t gfp,
gfp              1483 mm/shmem.c     	page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
gfp              1491 mm/shmem.c     static struct page *shmem_alloc_page(gfp_t gfp,
gfp              1498 mm/shmem.c     	page = alloc_page_vma(gfp, &pvma, 0);
gfp              1504 mm/shmem.c     static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
gfp              1521 mm/shmem.c     		page = shmem_alloc_hugepage(gfp, info, index);
gfp              1523 mm/shmem.c     		page = shmem_alloc_page(gfp, info, index);
gfp              1548 mm/shmem.c     static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
gfp              1550 mm/shmem.c     	return page_zonenum(page) > gfp_zone(gfp);
gfp              1553 mm/shmem.c     static int shmem_replace_page(struct page **pagep, gfp_t gfp,
gfp              1571 mm/shmem.c     	gfp &= ~GFP_CONSTRAINT_MASK;
gfp              1572 mm/shmem.c     	newpage = shmem_alloc_page(gfp, info, index);
gfp              1628 mm/shmem.c     			     gfp_t gfp, struct vm_area_struct *vma,
gfp              1653 mm/shmem.c     		page = shmem_swapin(swap, gfp, info, index);
gfp              1673 mm/shmem.c     	if (shmem_should_replace_page(page, gfp)) {
gfp              1674 mm/shmem.c     		error = shmem_replace_page(&page, gfp, info, index);
gfp              1679 mm/shmem.c     	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
gfp              1683 mm/shmem.c     						swp_to_radix_entry(swap), gfp);
gfp              1741 mm/shmem.c     	struct page **pagep, enum sgp_type sgp, gfp_t gfp,
gfp              1773 mm/shmem.c     					  sgp, gfp, vma, fault_type);
gfp              1834 mm/shmem.c     	page = shmem_alloc_and_acct_page(gfp, inode, index, true);
gfp              1837 mm/shmem.c     		page = shmem_alloc_and_acct_page(gfp, inode,
gfp              1871 mm/shmem.c     	error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg,
gfp              1876 mm/shmem.c     					NULL, gfp & GFP_RECLAIM_MASK);
gfp              1994 mm/shmem.c     	gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
gfp              2067 mm/shmem.c     				  gfp, vma, vmf, &ret);
gfp              2319 mm/shmem.c     	gfp_t gfp = mapping_gfp_mask(mapping);
gfp              2334 mm/shmem.c     		page = shmem_alloc_page(gfp, info, pgoff);
gfp              2371 mm/shmem.c     	ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
gfp              2376 mm/shmem.c     						gfp & GFP_RECLAIM_MASK);
gfp              4218 mm/shmem.c     					 pgoff_t index, gfp_t gfp)
gfp              4227 mm/shmem.c     				  gfp, NULL, NULL, NULL);
gfp              4237 mm/shmem.c     	return read_cache_page_gfp(mapping, index, gfp);
gfp               214 mm/slab.c      static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
gfp               533 mm/slab.c      					    int batchcount, gfp_t gfp)
gfp               538 mm/slab.c      	ac = kmalloc_node(memsize, gfp, node);
gfp               597 mm/slab.c      						int limit, gfp_t gfp)
gfp               634 mm/slab.c      						int batch, gfp_t gfp)
gfp               639 mm/slab.c      	alc = kmalloc_node(memsize, gfp, node);
gfp               648 mm/slab.c      static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
gfp               655 mm/slab.c      	alc_ptr = kcalloc_node(nr_node_ids, sizeof(void *), gfp, node);
gfp               662 mm/slab.c      		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
gfp               807 mm/slab.c      static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
gfp               826 mm/slab.c      	n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
gfp               873 mm/slab.c      				int node, gfp_t gfp, bool force_change)
gfp               883 mm/slab.c      		new_alien = alloc_alien_cache(node, cachep->limit, gfp);
gfp               890 mm/slab.c      			cachep->shared * cachep->batchcount, 0xbaadf00d, gfp);
gfp               895 mm/slab.c      	ret = init_cache_node(cachep, node, gfp);
gfp              1742 mm/slab.c      static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
gfp              1745 mm/slab.c      		return enable_cpucache(cachep, gfp);
gfp              1762 mm/slab.c      				sizeof(struct kmem_cache_node), gfp, node);
gfp              1914 mm/slab.c      	gfp_t gfp;
gfp              1965 mm/slab.c      		gfp = GFP_KERNEL;
gfp              1967 mm/slab.c      		gfp = GFP_NOWAIT;
gfp              2070 mm/slab.c      	err = setup_cpu_cache(cachep, gfp);
gfp              3764 mm/slab.c      static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp)
gfp              3771 mm/slab.c      		ret = setup_kmem_cache_node(cachep, node, gfp, true);
gfp              3799 mm/slab.c      				int batchcount, int shared, gfp_t gfp)
gfp              3841 mm/slab.c      	return setup_kmem_cache_nodes(cachep, gfp);
gfp              3845 mm/slab.c      				int batchcount, int shared, gfp_t gfp)
gfp              3850 mm/slab.c      	ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
gfp              3861 mm/slab.c      		__do_tune_cpucache(c, limit, batchcount, shared, gfp);
gfp              3868 mm/slab.c      static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
gfp              3875 mm/slab.c      	err = cache_random_seq_create(cachep, cachep->num, gfp);
gfp              3931 mm/slab.c      	err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
gfp               348 mm/slab.h      					     gfp_t gfp, int order,
gfp               368 mm/slab.h      	ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
gfp               446 mm/slab.h      static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
gfp               480 mm/slab.h      					    gfp_t gfp, int order,
gfp               489 mm/slab.h      	return memcg_charge_slab(page, gfp, order, s);
gfp               663 mm/slab.h      			gfp_t gfp);
gfp               667 mm/slab.h      					unsigned int count, gfp_t gfp)
gfp              1361 mm/slab_common.c 				    gfp_t gfp)
gfp              1368 mm/slab_common.c 	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
gfp               191 mm/slob.c      static void *slob_new_pages(gfp_t gfp, int order, int node)
gfp               197 mm/slob.c      		page = __alloc_pages_node(node, gfp, order);
gfp               200 mm/slob.c      		page = alloc_pages(gfp, order);
gfp               301 mm/slob.c      static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
gfp               358 mm/slob.c      		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
gfp               374 mm/slob.c      	if (unlikely(gfp & __GFP_ZERO))
gfp               469 mm/slob.c      __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp               475 mm/slob.c      	gfp &= gfp_allowed_mask;
gfp               477 mm/slob.c      	fs_reclaim_acquire(gfp);
gfp               478 mm/slob.c      	fs_reclaim_release(gfp);
gfp               493 mm/slob.c      		m = slob_alloc(size + minalign, gfp, align, node, minalign);
gfp               501 mm/slob.c      				   size, size + minalign, gfp, node);
gfp               506 mm/slob.c      			gfp |= __GFP_COMP;
gfp               507 mm/slob.c      		ret = slob_new_pages(gfp, order, node);
gfp               510 mm/slob.c      				   size, PAGE_SIZE << order, gfp, node);
gfp               513 mm/slob.c      	kmemleak_alloc(ret, size, 1, gfp);
gfp               517 mm/slob.c      void *__kmalloc(size_t size, gfp_t gfp)
gfp               519 mm/slob.c      	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
gfp               523 mm/slob.c      void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
gfp               525 mm/slob.c      	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
gfp               529 mm/slob.c      void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
gfp               532 mm/slob.c      	return __do_kmalloc_node(size, gfp, node, caller);
gfp               629 mm/slob.c      void *__kmalloc_node(size_t size, gfp_t gfp, int node)
gfp               631 mm/slob.c      	return __do_kmalloc_node(size, gfp, node, _RET_IP_);
gfp               635 mm/slob.c      void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
gfp               637 mm/slob.c      	return slob_alloc_node(cachep, gfp, node);
gfp               114 mm/swap_state.c int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)
gfp               144 mm/swap_state.c 	} while (xas_nomem(&xas, gfp));
gfp                51 mm/util.c      char *kstrdup(const char *s, gfp_t gfp)
gfp                60 mm/util.c      	buf = kmalloc_track_caller(len, gfp);
gfp                77 mm/util.c      const char *kstrdup_const(const char *s, gfp_t gfp)
gfp                82 mm/util.c      	return kstrdup(s, gfp);
gfp                96 mm/util.c      char *kstrndup(const char *s, size_t max, gfp_t gfp)
gfp               105 mm/util.c      	buf = kmalloc_track_caller(len+1, gfp);
gfp               123 mm/util.c      void *kmemdup(const void *src, size_t len, gfp_t gfp)
gfp               127 mm/util.c      	p = kmalloc_track_caller(len, gfp);
gfp               143 mm/util.c      char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
gfp               150 mm/util.c      	buf = kmalloc_track_caller(len + 1, gfp);
gfp               240 mm/vmpressure.c void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
gfp               256 mm/vmpressure.c 	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
gfp               323 mm/vmpressure.c void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
gfp               339 mm/vmpressure.c 	vmpressure(gfp, memcg, true, vmpressure_win, 0);
gfp               197 mm/z3fold.c    							gfp_t gfp)
gfp               202 mm/z3fold.c    				 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
gfp               299 mm/z3fold.c    					struct z3fold_pool *pool, gfp_t gfp)
gfp               313 mm/z3fold.c    	slots = alloc_slots(pool, gfp);
gfp               765 mm/z3fold.c    static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
gfp               771 mm/z3fold.c    	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
gfp               864 mm/z3fold.c    static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
gfp               871 mm/z3fold.c    	bool can_sleep = gfpflags_allow_blocking(gfp);
gfp               931 mm/z3fold.c    		page = alloc_page(gfp);
gfp               936 mm/z3fold.c    	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
gfp              1481 mm/z3fold.c    static void *z3fold_zpool_create(const char *name, gfp_t gfp,
gfp              1487 mm/z3fold.c    	pool = z3fold_create_pool(name, gfp,
gfp              1501 mm/z3fold.c    static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
gfp              1504 mm/z3fold.c    	return z3fold_alloc(pool, size, gfp, handle);
gfp               141 mm/zbud.c      static void *zbud_zpool_create(const char *name, gfp_t gfp,
gfp               147 mm/zbud.c      	pool = zbud_create_pool(gfp, zpool_ops ? &zbud_zpool_ops : NULL);
gfp               160 mm/zbud.c      static int zbud_zpool_malloc(void *pool, size_t size, gfp_t gfp,
gfp               163 mm/zbud.c      	return zbud_alloc(pool, size, gfp, handle);
gfp               306 mm/zbud.c      struct zbud_pool *zbud_create_pool(gfp_t gfp, const struct zbud_ops *ops)
gfp               311 mm/zbud.c      	pool = kzalloc(sizeof(struct zbud_pool), gfp);
gfp               354 mm/zbud.c      int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
gfp               362 mm/zbud.c      	if (!size || (gfp & __GFP_HIGHMEM))
gfp               386 mm/zbud.c      	page = alloc_page(gfp);
gfp               155 mm/zpool.c     struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
gfp               175 mm/zpool.c     	zpool = kmalloc(sizeof(*zpool), gfp);
gfp               183 mm/zpool.c     	zpool->pool = driver->create(name, gfp, ops, zpool);
gfp               273 mm/zpool.c     int zpool_malloc(struct zpool *zpool, size_t size, gfp_t gfp,
gfp               276 mm/zpool.c     	return zpool->driver->malloc(zpool->pool, size, gfp, handle);
gfp               351 mm/zsmalloc.c  static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
gfp               354 mm/zsmalloc.c  			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
gfp               387 mm/zsmalloc.c  static void *zs_zpool_create(const char *name, gfp_t gfp,
gfp               404 mm/zsmalloc.c  static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
gfp               407 mm/zsmalloc.c  	*handle = zs_malloc(pool, size, gfp);
gfp              1065 mm/zsmalloc.c  					gfp_t gfp)
gfp              1069 mm/zsmalloc.c  	struct zspage *zspage = cache_alloc_zspage(pool, gfp);
gfp              1081 mm/zsmalloc.c  		page = alloc_page(gfp);
gfp              1459 mm/zsmalloc.c  unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
gfp              1469 mm/zsmalloc.c  	handle = cache_alloc_handle(pool, gfp);
gfp              1491 mm/zsmalloc.c  	zspage = alloc_zspage(pool, class, gfp);
gfp               248 mm/zswap.c     static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
gfp               251 mm/zswap.c     	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
gfp               508 mm/zswap.c     	gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
gfp               529 mm/zswap.c     	pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
gfp               999 mm/zswap.c     	gfp_t gfp;
gfp              1073 mm/zswap.c     	gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
gfp              1075 mm/zswap.c     		gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
gfp              1076 mm/zswap.c     	ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
gfp                12 net/ceph/buffer.c struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
gfp                16 net/ceph/buffer.c 	b = kmalloc(sizeof(*b), gfp);
gfp                20 net/ceph/buffer.c 	b->vec.iov_base = ceph_kvmalloc(len, gfp);
gfp               537 net/ceph/mon_client.c alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
gfp               541 net/ceph/mon_client.c 	req = kzalloc(sizeof(*req), gfp);
gfp               612 net/ceph/osd_client.c static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
gfp               646 net/ceph/osd_client.c 				    num_request_data_items, gfp, true);
gfp               663 net/ceph/osd_client.c 				    num_reply_data_items, gfp, true);
gfp               732 net/ceph/osd_client.c int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
gfp               737 net/ceph/osd_client.c 	return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
gfp              1969 net/ceph/osdmap.c int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
gfp              1982 net/ceph/osdmap.c 		external_name = kmalloc(len + 1, gfp);
gfp              1997 net/ceph/osdmap.c int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
gfp              2004 net/ceph/osdmap.c 	ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
gfp               140 net/core/dst_cache.c int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp)
gfp               143 net/core/dst_cache.c 					    gfp | __GFP_ZERO);
gfp                23 net/core/hwbm.c int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
gfp                31 net/core/hwbm.c 		buf = kmalloc(frag_size, gfp);
gfp               249 net/core/net_namespace.c 			      struct nlmsghdr *nlh, gfp_t gfp);
gfp               253 net/core/net_namespace.c int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
gfp               272 net/core/net_namespace.c 		rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
gfp              1059 net/core/net_namespace.c 			      struct nlmsghdr *nlh, gfp_t gfp)
gfp              1070 net/core/net_namespace.c 	msg = nlmsg_new(rtnl_net_get_size(), gfp);
gfp              1078 net/core/net_namespace.c 	rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
gfp               124 net/core/page_pool.c 	gfp_t gfp = _gfp;
gfp               131 net/core/page_pool.c 		gfp |= __GFP_COMP;
gfp               141 net/core/page_pool.c 	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
gfp               175 net/core/page_pool.c struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
gfp               185 net/core/page_pool.c 	page = __page_pool_alloc_pages_slow(pool, gfp);
gfp              1526 net/core/rtnetlink.c 				  struct net *src_net, gfp_t gfp)
gfp              1534 net/core/rtnetlink.c 			int id = peernet2id_alloc(src_net, link_net, gfp);
gfp              1592 net/core/rtnetlink.c 			    int tgt_netnsid, gfp_t gfp)
gfp              1684 net/core/rtnetlink.c 	if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
gfp               128 net/core/skbuff.c #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
gfp               129 net/core/skbuff.c 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
gfp              5770 net/core/skbuff.c static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
gfp              5911 net/core/skbuff.c static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
gfp              5916 net/core/skbuff.c 		return pskb_carve_inside_header(skb, len, headlen, gfp);
gfp              5918 net/core/skbuff.c 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
gfp              5925 net/core/skbuff.c 			     int to_copy, gfp_t gfp)
gfp              5927 net/core/skbuff.c 	struct sk_buff  *clone = skb_clone(skb, gfp);
gfp              5932 net/core/skbuff.c 	if (pskb_carve(clone, off, gfp) < 0 ||
gfp              2363 net/core/sock.c bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
gfp              2379 net/core/sock.c 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
gfp              2388 net/core/sock.c 	pfrag->page = alloc_page(gfp);
gfp               256 net/core/xdp.c static int __mem_id_cyclic_get(gfp_t gfp)
gfp               262 net/core/xdp.c 	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
gfp               293 net/core/xdp.c 	gfp_t gfp = GFP_KERNEL;
gfp               324 net/core/xdp.c 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
gfp               329 net/core/xdp.c 	id = __mem_id_cyclic_get(gfp);
gfp               464 net/decnet/af_decnet.c static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
gfp               467 net/decnet/af_decnet.c 	struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
gfp               481 net/decnet/af_decnet.c 	sk->sk_allocation  = gfp;
gfp               204 net/decnet/dn_nsp_out.c 					     gfp_t gfp)
gfp               210 net/decnet/dn_nsp_out.c 	if ((skb2 = skb_clone(skb, gfp)) != NULL) {
gfp               341 net/decnet/dn_nsp_out.c 			gfp_t gfp, int oth)
gfp               365 net/decnet/dn_nsp_out.c 	dn_nsp_clone_and_send(skb, gfp);
gfp               493 net/decnet/dn_nsp_out.c void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
gfp               500 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
gfp               525 net/decnet/dn_nsp_out.c 			unsigned short reason, gfp_t gfp,
gfp               539 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
gfp               568 net/decnet/dn_nsp_out.c 			unsigned short reason, gfp_t gfp)
gfp               579 net/decnet/dn_nsp_out.c 	dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl,
gfp               589 net/decnet/dn_nsp_out.c 	gfp_t gfp = GFP_ATOMIC;
gfp               591 net/decnet/dn_nsp_out.c 	dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl,
gfp               601 net/decnet/dn_nsp_out.c 	gfp_t gfp = GFP_ATOMIC;
gfp               603 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
gfp               612 net/decnet/dn_nsp_out.c 	dn_nsp_queue_xmit(sk, skb, gfp, 1);
gfp               159 net/ipv4/igmp.c 			      gfp_t gfp);
gfp              1165 net/ipv4/igmp.c 			      gfp_t gfp)
gfp              1176 net/ipv4/igmp.c 	pmc = kzalloc(sizeof(*pmc), gfp);
gfp              1278 net/ipv4/igmp.c static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp)
gfp              1309 net/ipv4/igmp.c 		igmpv3_add_delrec(in_dev, im, gfp);
gfp              1423 net/ipv4/igmp.c 				unsigned int mode, gfp_t gfp)
gfp              1437 net/ipv4/igmp.c 	im = kzalloc(sizeof(*im), gfp);
gfp              1470 net/ipv4/igmp.c void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
gfp              1472 net/ipv4/igmp.c 	____ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE, gfp);
gfp              1661 net/ipv4/igmp.c void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp)
gfp              1676 net/ipv4/igmp.c 				__igmp_group_dropped(i, gfp);
gfp               862 net/ipv4/tcp.c struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
gfp               885 net/ipv4/tcp.c 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
gfp              1071 net/ipv4/tcp_ipv4.c 		   gfp_t gfp)
gfp              1089 net/ipv4/tcp_ipv4.c 		md5sig = kmalloc(sizeof(*md5sig), gfp);
gfp              1098 net/ipv4/tcp_ipv4.c 	key = sock_kmalloc(sk, sizeof(*key), gfp);
gfp                61 net/ipv4/tcp_output.c 			   int push_one, gfp_t gfp);
gfp              1305 net/ipv4/tcp_output.c 		 unsigned int mss_now, gfp_t gfp)
gfp              1335 net/ipv4/tcp_output.c 	if (skb_unclone(skb, gfp))
gfp              1339 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
gfp              1883 net/ipv4/tcp_output.c 			unsigned int mss_now, gfp_t gfp)
gfp              1892 net/ipv4/tcp_output.c 				    skb, len, mss_now, gfp);
gfp              1894 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
gfp              2366 net/ipv4/tcp_output.c 			   int push_one, gfp_t gfp)
gfp              2442 net/ipv4/tcp_output.c 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
gfp              2456 net/ipv4/tcp_output.c 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
gfp              1200 net/ipv6/ip6_output.c 					       gfp_t gfp)
gfp              1202 net/ipv6/ip6_output.c 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
gfp              1206 net/ipv6/ip6_output.c 						gfp_t gfp)
gfp              1208 net/ipv6/ip6_output.c 	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
gfp               468 net/key/af_key.c 								     gfp_t gfp)
gfp               473 net/key/af_key.c 	uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
gfp              3441 net/mac80211/cfg.c 			     u64 *cookie, gfp_t gfp)
gfp              3447 net/mac80211/cfg.c 	ack_skb = skb_copy(skb, gfp);
gfp              3778 net/mac80211/cfg.c 				   gfp_t gfp)
gfp              3803 net/mac80211/cfg.c 				     reason, cookie, gfp);
gfp              3809 net/mac80211/cfg.c 			      gfp_t gfp)
gfp              3828 net/mac80211/cfg.c 	cfg80211_nan_match(ieee80211_vif_to_wdev(vif), match, gfp);
gfp              1603 net/mac80211/ieee80211_i.h 			     u64 *cookie, gfp_t gfp);
gfp              1092 net/mac80211/key.c 				const u8 *replay_ctr, gfp_t gfp)
gfp              1098 net/mac80211/key.c 	cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp);
gfp              5672 net/mac80211/mlme.c 			       gfp_t gfp)
gfp              5678 net/mac80211/mlme.c 	cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp);
gfp              5682 net/mac80211/mlme.c void ieee80211_cqm_beacon_loss_notify(struct ieee80211_vif *vif, gfp_t gfp)
gfp              5688 net/mac80211/mlme.c 	cfg80211_cqm_beacon_loss_notify(sdata->dev, gfp);
gfp               202 net/mac80211/pm.c 				    gfp_t gfp)
gfp               206 net/mac80211/pm.c 	cfg80211_report_wowlan_wakeup(&sdata->wdev, wakeup, gfp);
gfp                38 net/mac80211/rate.h 					   struct sta_info *sta, gfp_t gfp)
gfp                41 net/mac80211/rate.h 	return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp);
gfp              1556 net/mac80211/rc80211_minstrel_ht.c minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
gfp              1571 net/mac80211/rc80211_minstrel_ht.c 	msp = kzalloc(sizeof(*msp), gfp);
gfp              1575 net/mac80211/rc80211_minstrel_ht.c 	msp->ratelist = kcalloc(max_rates, sizeof(struct minstrel_rate), gfp);
gfp              1579 net/mac80211/rc80211_minstrel_ht.c 	msp->sample_table = kmalloc_array(max_rates, SAMPLE_COLUMNS, gfp);
gfp               290 net/mac80211/sta_info.c 				    struct sta_info *sta, gfp_t gfp)
gfp               297 net/mac80211/sta_info.c 						     sta, gfp);
gfp               305 net/mac80211/sta_info.c 				const u8 *addr, gfp_t gfp)
gfp               312 net/mac80211/sta_info.c 	sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
gfp               318 net/mac80211/sta_info.c 			alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
gfp               330 net/mac80211/sta_info.c 		sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
gfp               379 net/mac80211/sta_info.c 		txq_data = kcalloc(ARRAY_SIZE(sta->sta.txq), size, gfp);
gfp               391 net/mac80211/sta_info.c 	if (sta_prepare_rate_control(local, sta, gfp))
gfp               743 net/mac80211/sta_info.h 				const u8 *addr, gfp_t gfp);
gfp              1444 net/mac80211/tdls.c 				 u16 reason_code, gfp_t gfp)
gfp              1454 net/mac80211/tdls.c 	cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp);
gfp              1349 net/netfilter/nf_conntrack_core.c 		     gfp_t gfp, u32 hash)
gfp              1371 net/netfilter/nf_conntrack_core.c 	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
gfp              1404 net/netfilter/nf_conntrack_core.c 				   gfp_t gfp)
gfp              1406 net/netfilter/nf_conntrack_core.c 	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
gfp                40 net/netfilter/nf_conntrack_extend.c void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
gfp                72 net/netfilter/nf_conntrack_extend.c 	new = __krealloc(old, alloc, gfp);
gfp               199 net/netfilter/nf_conntrack_helper.c nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
gfp               203 net/netfilter/nf_conntrack_helper.c 	help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
gfp               108 net/netfilter/nf_tables_api.c 					     int msg_type, u32 size, gfp_t gfp)
gfp               112 net/netfilter/nf_tables_api.c 	trans = kzalloc(sizeof(struct nft_trans) + size, gfp);
gfp              4424 net/netfilter/nf_tables_api.c 			u64 timeout, u64 expiration, gfp_t gfp)
gfp              4429 net/netfilter/nf_tables_api.c 	elem = kzalloc(set->ops->elemsize + tmpl->len, gfp);
gfp              4975 net/netfilter/nf_tables_api.c 						gfp_t gfp)
gfp              4979 net/netfilter/nf_tables_api.c 	gcb = kzalloc(sizeof(*gcb), gfp);
gfp              5568 net/netfilter/nf_tables_api.c 		    int family, int report, gfp_t gfp)
gfp              5577 net/netfilter/nf_tables_api.c 	skb = nlmsg_new(NLMSG_GOODSIZE, gfp);
gfp              5588 net/netfilter/nf_tables_api.c 	nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp);
gfp               732 net/nfc/core.c struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp)
gfp               738 net/nfc/core.c 	skb = alloc_skb(total_size, gfp);
gfp               216 net/nfc/llcp.h struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern);
gfp               950 net/nfc/llcp_sock.c struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern)
gfp               955 net/nfc/llcp_sock.c 	sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern);
gfp              1597 net/nfc/netlink.c 			   u32 oui, u32 subcmd, gfp_t gfp)
gfp              1602 net/nfc/netlink.c 	skb = nlmsg_new(approxlen + 100, gfp);
gfp              1895 net/openvswitch/datapath.c 				   u32 flags, u8 cmd, gfp_t gfp)
gfp              1916 net/openvswitch/datapath.c 		int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
gfp               164 net/rds/connection.c 						gfp_t gfp, u8 tos,
gfp               193 net/rds/connection.c 	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
gfp               198 net/rds/connection.c 	conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
gfp               339 net/rds/connection.c 				       gfp_t gfp, int dev_if)
gfp               341 net/rds/connection.c 	return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 0, dev_if);
gfp               349 net/rds/connection.c 						u8 tos, gfp_t gfp, int dev_if)
gfp               351 net/rds/connection.c 	return __rds_conn_create(net, laddr, faddr, trans, gfp, tos, 1, dev_if);
gfp               370 net/rds/ib.h   int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
gfp               401 net/rds/ib.h   int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
gfp               403 net/rds/ib.h   void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
gfp              1124 net/rds/ib_cm.c int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
gfp              1131 net/rds/ib_cm.c 	ic = kzalloc(sizeof(struct rds_ib_connection), gfp);
gfp              1135 net/rds/ib_cm.c 	ret = rds_ib_recv_alloc_caches(ic, gfp);
gfp               101 net/rds/ib_recv.c static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
gfp               106 net/rds/ib_recv.c 	cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
gfp               121 net/rds/ib_recv.c int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
gfp               125 net/rds/ib_recv.c 	ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
gfp               127 net/rds/ib_recv.c 		ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
gfp               308 net/rds/ib_recv.c 				  struct rds_ib_recv_work *recv, gfp_t gfp)
gfp               316 net/rds/ib_recv.c 	if (gfp & __GFP_DIRECT_RECLAIM) {
gfp               381 net/rds/ib_recv.c void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
gfp               387 net/rds/ib_recv.c 	bool can_wait = !!(gfp & __GFP_DIRECT_RECLAIM);
gfp               407 net/rds/ib_recv.c 		ret = rds_ib_recv_refill_one(conn, recv, gfp);
gfp               135 net/rds/loop.c static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
gfp               140 net/rds/loop.c 	lc = kzalloc(sizeof(struct rds_loop_connection), gfp);
gfp               284 net/rds/message.c struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
gfp               291 net/rds/message.c 	rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
gfp                69 net/rds/page.c 			     gfp_t gfp)
gfp                76 net/rds/page.c 	gfp |= __GFP_HIGHMEM;
gfp                80 net/rds/page.c 		page = alloc_page(gfp);
gfp               122 net/rds/page.c 		page = alloc_page(gfp);
gfp               551 net/rds/rds.h  	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
gfp               773 net/rds/rds.h  				       u8 tos, gfp_t gfp,
gfp               779 net/rds/rds.h  						u8 tos, gfp_t gfp, int dev_if);
gfp               851 net/rds/rds.h  struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
gfp               885 net/rds/rds.h  			     gfp_t gfp);
gfp               896 net/rds/rds.h  		       struct rds_incoming *inc, gfp_t gfp);
gfp               284 net/rds/recv.c 		       struct rds_incoming *inc, gfp_t gfp)
gfp               376 net/rds/tcp.c  static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
gfp               383 net/rds/tcp.c  		tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
gfp               151 net/rds/tcp_recv.c 	gfp_t gfp;
gfp               174 net/rds/tcp_recv.c 						arg->gfp);
gfp               218 net/rds/tcp_recv.c 			clone = pskb_extract(skb, offset, to_copy, arg->gfp);
gfp               245 net/rds/tcp_recv.c 						  arg->gfp);
gfp               262 net/rds/tcp_recv.c static int rds_tcp_read_sock(struct rds_conn_path *cp, gfp_t gfp)
gfp               271 net/rds/tcp_recv.c 	arg.gfp = gfp;
gfp               277 net/rds/tcp_recv.c 	rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
gfp               285 net/rxrpc/af_rxrpc.c 					   gfp_t gfp,
gfp               322 net/rxrpc/af_rxrpc.c 	call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
gfp                33 net/rxrpc/call_accept.c 				      unsigned long user_call_ID, gfp_t gfp,
gfp                67 net/rxrpc/call_accept.c 		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
gfp                80 net/rxrpc/call_accept.c 		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
gfp                94 net/rxrpc/call_accept.c 	call = rxrpc_alloc_call(rx, gfp, debug_id);
gfp               161 net/rxrpc/call_accept.c int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
gfp               166 net/rxrpc/call_accept.c 		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
gfp               175 net/rxrpc/call_accept.c 	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
gfp               657 net/rxrpc/call_accept.c 			       unsigned long user_call_ID, gfp_t gfp,
gfp               668 net/rxrpc/call_accept.c 					  gfp, debug_id);
gfp                98 net/rxrpc/call_object.c struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
gfp               104 net/rxrpc/call_object.c 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
gfp               110 net/rxrpc/call_object.c 				    gfp);
gfp               114 net/rxrpc/call_object.c 	call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
gfp               171 net/rxrpc/call_object.c 						  gfp_t gfp,
gfp               179 net/rxrpc/call_object.c 	call = rxrpc_alloc_call(rx, gfp, debug_id);
gfp               220 net/rxrpc/call_object.c 					 gfp_t gfp,
gfp               233 net/rxrpc/call_object.c 	call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
gfp               289 net/rxrpc/call_object.c 	ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
gfp               101 net/rxrpc/conn_client.c 					  gfp_t gfp)
gfp               108 net/rxrpc/conn_client.c 	idr_preload(gfp);
gfp               168 net/rxrpc/conn_client.c rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
gfp               176 net/rxrpc/conn_client.c 	conn = rxrpc_alloc_connection(gfp);
gfp               193 net/rxrpc/conn_client.c 	ret = rxrpc_get_client_connection_id(conn, gfp);
gfp               279 net/rxrpc/conn_client.c 				 gfp_t gfp)
gfp               289 net/rxrpc/conn_client.c 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
gfp               340 net/rxrpc/conn_client.c 	candidate = rxrpc_alloc_client_connection(cp, gfp);
gfp               642 net/rxrpc/conn_client.c static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
gfp               651 net/rxrpc/conn_client.c 		if (!gfpflags_allow_blocking(gfp)) {
gfp               698 net/rxrpc/conn_client.c 		       gfp_t gfp)
gfp               708 net/rxrpc/conn_client.c 	ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
gfp               715 net/rxrpc/conn_client.c 	ret = rxrpc_wait_for_channel(call, gfp);
gfp                35 net/rxrpc/conn_object.c struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
gfp                41 net/rxrpc/conn_object.c 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
gfp               120 net/rxrpc/conn_service.c 							   gfp_t gfp)
gfp               122 net/rxrpc/conn_service.c 	struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
gfp               210 net/rxrpc/peer_object.c struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
gfp               216 net/rxrpc/peer_object.c 	peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
gfp               284 net/rxrpc/peer_object.c 					    gfp_t gfp)
gfp               290 net/rxrpc/peer_object.c 	peer = rxrpc_alloc_peer(local, gfp);
gfp               325 net/rxrpc/peer_object.c 				     struct sockaddr_rxrpc *srx, gfp_t gfp)
gfp               344 net/rxrpc/peer_object.c 		candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
gfp               114 net/sched/act_ife.c int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
gfp               116 net/sched/act_ife.c 	mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
gfp               124 net/sched/act_ife.c int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
gfp               126 net/sched/act_ife.c 	mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
gfp                55 net/sctp/associola.c 					enum sctp_scope scope, gfp_t gfp)
gfp               228 net/sctp/associola.c 			     0, gfp))
gfp               252 net/sctp/associola.c 	if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
gfp               285 net/sctp/associola.c 					      enum sctp_scope scope, gfp_t gfp)
gfp               289 net/sctp/associola.c 	asoc = kzalloc(sizeof(*asoc), gfp);
gfp               293 net/sctp/associola.c 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
gfp               579 net/sctp/associola.c 					   const gfp_t gfp,
gfp               612 net/sctp/associola.c 	peer = sctp_transport_new(net, addr, gfp);
gfp              1570 net/sctp/associola.c 				     enum sctp_scope scope, gfp_t gfp)
gfp              1586 net/sctp/associola.c 				   scope, gfp, flags);
gfp              1592 net/sctp/associola.c 					 gfp_t gfp)
gfp              1599 net/sctp/associola.c 				      asoc->ep->base.bind_addr.port, gfp);
gfp              1617 net/sctp/associola.c int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
gfp              1619 net/sctp/associola.c 	bool preload = gfpflags_allow_blocking(gfp);
gfp              1627 net/sctp/associola.c 		idr_preload(gfp);
gfp                58 net/sctp/auth.c static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
gfp                67 net/sctp/auth.c 	key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
gfp                79 net/sctp/auth.c struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
gfp                84 net/sctp/auth.c 	new = kzalloc(sizeof(struct sctp_shared_key), gfp);
gfp               188 net/sctp/auth.c 			gfp_t gfp)
gfp               202 net/sctp/auth.c 	new = sctp_auth_create_key(len, gfp);
gfp               223 net/sctp/auth.c 				    gfp_t gfp)
gfp               228 net/sctp/auth.c 			(struct sctp_hmac_algo_param *)asoc->c.auth_hmacs, gfp);
gfp               234 net/sctp/auth.c 				    gfp_t gfp)
gfp               239 net/sctp/auth.c 					 gfp);
gfp               256 net/sctp/auth.c 			gfp_t gfp)
gfp               266 net/sctp/auth.c 	secret = sctp_auth_create_key(auth_len, gfp);
gfp               289 net/sctp/auth.c 				 gfp_t gfp)
gfp               311 net/sctp/auth.c 	local_key_vector = sctp_auth_make_local_vector(asoc, gfp);
gfp               312 net/sctp/auth.c 	peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp);
gfp               341 net/sctp/auth.c 					    gfp);
gfp               355 net/sctp/auth.c 				gfp_t gfp)
gfp               363 net/sctp/auth.c 		new = sctp_auth_shkey_create(sh_key->key_id, gfp);
gfp               383 net/sctp/auth.c int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
gfp               403 net/sctp/auth.c 	secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
gfp               454 net/sctp/auth.c int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
gfp               466 net/sctp/auth.c 				 gfp);
gfp               708 net/sctp/auth.c 			      struct sctp_shared_key *ep_key, gfp_t gfp)
gfp               728 net/sctp/auth.c 		asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp);
gfp              1011 net/sctp/auth.c int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp)
gfp              1023 net/sctp/auth.c 						 SCTP_AUTH_NUM_HMACS), gfp);
gfp              1042 net/sctp/auth.c 				      SCTP_NUM_CHUNK_TYPES, gfp);
gfp              1055 net/sctp/auth.c 	err = sctp_auth_init_hmacs(ep, gfp);
gfp                35 net/sctp/bind_addr.c 			      gfp_t gfp, int flags);
gfp                45 net/sctp/bind_addr.c 			enum sctp_scope scope, gfp_t gfp,
gfp                57 net/sctp/bind_addr.c 					   gfp, flags);
gfp                69 net/sctp/bind_addr.c 						   SCTP_SCOPE_LINK, gfp,
gfp                90 net/sctp/bind_addr.c 			gfp_t gfp)
gfp               100 net/sctp/bind_addr.c 					   1, gfp);
gfp               139 net/sctp/bind_addr.c 		       int new_size, __u8 addr_state, gfp_t gfp)
gfp               144 net/sctp/bind_addr.c 	addr = kzalloc(sizeof(*addr), gfp);
gfp               207 net/sctp/bind_addr.c 					 gfp_t gfp)
gfp               234 net/sctp/bind_addr.c 	retval.v = kmalloc(len, gfp);
gfp               258 net/sctp/bind_addr.c 			   int addrs_len, __u16 port, gfp_t gfp)
gfp               283 net/sctp/bind_addr.c 					    SCTP_ADDR_SRC, gfp);
gfp               452 net/sctp/bind_addr.c 			      gfp_t gfp, int flags)
gfp               457 net/sctp/bind_addr.c 		error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags);
gfp               469 net/sctp/bind_addr.c 						   SCTP_ADDR_SRC, gfp);
gfp                47 net/sctp/chunk.c static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
gfp                50 net/sctp/chunk.c 	msg = kmalloc(sizeof(struct sctp_datamsg), gfp);
gfp                43 net/sctp/endpointola.c 						gfp_t gfp)
gfp                48 net/sctp/endpointola.c 	ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp);
gfp                55 net/sctp/endpointola.c 		if (sctp_auth_init(ep, gfp))
gfp                98 net/sctp/endpointola.c 	null_key = sctp_auth_shkey_create(0, gfp);
gfp               129 net/sctp/endpointola.c struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
gfp               134 net/sctp/endpointola.c 	ep = kzalloc(sizeof(*ep), gfp);
gfp               138 net/sctp/endpointola.c 	if (!sctp_endpoint_init(ep, sk, gfp))
gfp               180 net/sctp/output.c 					  int one_packet, gfp_t gfp)
gfp               192 net/sctp/output.c 			error = sctp_packet_transmit(packet, gfp);
gfp               407 net/sctp/output.c 			    struct sk_buff *head, int gso, gfp_t gfp)
gfp               442 net/sctp/output.c 		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
gfp               491 net/sctp/output.c 						 packet->auth->shkey, gfp);
gfp               545 net/sctp/output.c int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
gfp               573 net/sctp/output.c 			 MAX_HEADER, gfp);
gfp               597 net/sctp/output.c 	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
gfp                55 net/sctp/outqueue.c static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
gfp               280 net/sctp/outqueue.c void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
gfp               312 net/sctp/outqueue.c 		sctp_outq_flush(q, 0, gfp);
gfp               592 net/sctp/outqueue.c 				 int rtx_timeout, int *start_timer, gfp_t gfp)
gfp               668 net/sctp/outqueue.c 				sctp_packet_transmit(pkt, gfp);
gfp               673 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
gfp               689 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
gfp               699 net/sctp/outqueue.c 			error = sctp_packet_transmit(pkt, gfp);
gfp               752 net/sctp/outqueue.c void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
gfp               757 net/sctp/outqueue.c 	sctp_outq_flush(q, 0, gfp);
gfp               761 net/sctp/outqueue.c 				 struct sctp_chunk *chunk, gfp_t gfp)
gfp               772 net/sctp/outqueue.c 	return sctp_packet_transmit(&singleton, gfp);
gfp               785 net/sctp/outqueue.c 	gfp_t gfp;
gfp               904 net/sctp/outqueue.c 						      ctx->gfp);
gfp               940 net/sctp/outqueue.c 							    one_packet, ctx->gfp);
gfp               993 net/sctp/outqueue.c 				      &start_timer, ctx->gfp);
gfp              1092 net/sctp/outqueue.c 						    ctx->gfp);
gfp              1146 net/sctp/outqueue.c 			error = sctp_packet_transmit(packet, ctx->gfp);
gfp              1165 net/sctp/outqueue.c static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
gfp              1173 net/sctp/outqueue.c 		.gfp = gfp,
gfp               133 net/sctp/protocol.c 			      enum sctp_scope scope, gfp_t gfp, int copy_flags)
gfp                51 net/sctp/sm_make_chunk.c 					    gfp_t gfp);
gfp                53 net/sctp/sm_make_chunk.c 					 __u8 flags, int paylen, gfp_t gfp);
gfp                56 net/sctp/sm_make_chunk.c 					   gfp_t gfp);
gfp                66 net/sctp/sm_make_chunk.c 			      gfp_t gfp);
gfp               208 net/sctp/sm_make_chunk.c 				  gfp_t gfp, int vparam_len)
gfp               233 net/sctp/sm_make_chunk.c 	addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp);
gfp               319 net/sctp/sm_make_chunk.c 	retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize, gfp);
gfp               381 net/sctp/sm_make_chunk.c 				      gfp_t gfp, int unkparam_len)
gfp               400 net/sctp/sm_make_chunk.c 	addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp);
gfp               472 net/sctp/sm_make_chunk.c 	retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp);
gfp               724 net/sctp/sm_make_chunk.c 					    int len, __u8 flags, gfp_t gfp)
gfp               740 net/sctp/sm_make_chunk.c 	retval = sctp_make_data(asoc, flags, sizeof(dp) + len, gfp);
gfp              1332 net/sctp/sm_make_chunk.c 				 struct sock *sk, gfp_t gfp)
gfp              1336 net/sctp/sm_make_chunk.c 	retval = kmem_cache_zalloc(sctp_chunk_cachep, gfp);
gfp              1385 net/sctp/sm_make_chunk.c 					   gfp_t gfp)
gfp              1398 net/sctp/sm_make_chunk.c 	skb = alloc_skb(chunklen, gfp);
gfp              1409 net/sctp/sm_make_chunk.c 	retval = sctp_chunkify(skb, asoc, sk, gfp);
gfp              1428 net/sctp/sm_make_chunk.c 					 __u8 flags, int paylen, gfp_t gfp)
gfp              1430 net/sctp/sm_make_chunk.c 	return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen, gfp);
gfp              1434 net/sctp/sm_make_chunk.c 				   __u8 flags, int paylen, gfp_t gfp)
gfp              1436 net/sctp/sm_make_chunk.c 	return _sctp_make_chunk(asoc, SCTP_CID_I_DATA, flags, paylen, gfp);
gfp              1441 net/sctp/sm_make_chunk.c 					    gfp_t gfp)
gfp              1445 net/sctp/sm_make_chunk.c 	chunk = _sctp_make_chunk(asoc, type, flags, paylen, gfp);
gfp              1585 net/sctp/sm_make_chunk.c 					     gfp_t gfp)
gfp              1593 net/sctp/sm_make_chunk.c 	asoc = sctp_association_new(ep, ep->base.sk, scope, gfp);
gfp              1701 net/sctp/sm_make_chunk.c 					struct sctp_chunk *chunk, gfp_t gfp,
gfp              1823 net/sctp/sm_make_chunk.c 	retval = sctp_association_new(ep, ep->base.sk, scope, gfp);
gfp              2312 net/sctp/sm_make_chunk.c 		      struct sctp_init_chunk *peer_init, gfp_t gfp)
gfp              2332 net/sctp/sm_make_chunk.c 	if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
gfp              2349 net/sctp/sm_make_chunk.c 		if (!sctp_process_param(asoc, param, peer_addr, gfp))
gfp              2435 net/sctp/sm_make_chunk.c 				asoc->peer.i.initial_tsn, gfp))
gfp              2447 net/sctp/sm_make_chunk.c 			     asoc->c.sinit_max_instreams, gfp))
gfp              2453 net/sctp/sm_make_chunk.c 	if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
gfp              2496 net/sctp/sm_make_chunk.c 			      gfp_t gfp)
gfp              2528 net/sctp/sm_make_chunk.c 			if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED))
gfp              2593 net/sctp/sm_make_chunk.c 		asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
gfp              2664 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
gfp              2678 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
gfp              2694 net/sctp/sm_make_chunk.c 					    ntohs(param.p->length), gfp);
gfp                48 net/sctp/sm_sideeffect.c 				gfp_t gfp);
gfp                57 net/sctp/sm_sideeffect.c 			     gfp_t gfp);
gfp               658 net/sctp/sm_sideeffect.c 				 gfp_t gfp)
gfp               667 net/sctp/sm_sideeffect.c 	if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
gfp              1094 net/sctp/sm_sideeffect.c 			      struct sctp_datamsg *msg, gfp_t gfp)
gfp              1099 net/sctp/sm_sideeffect.c 		sctp_outq_tail(&asoc->outqueue, chunk, gfp);
gfp              1132 net/sctp/sm_sideeffect.c 	       void *event_arg, gfp_t gfp)
gfp              1157 net/sctp/sm_sideeffect.c 				  &commands, gfp);
gfp              1174 net/sctp/sm_sideeffect.c 			     gfp_t gfp)
gfp              1187 net/sctp/sm_sideeffect.c 					       commands, gfp)))
gfp              1260 net/sctp/sm_sideeffect.c 				gfp_t gfp)
gfp              1293 net/sctp/sm_sideeffect.c 				sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1313 net/sctp/sm_sideeffect.c 				sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1377 net/sctp/sm_sideeffect.c 						      cmd->obj.init, gfp);
gfp              1468 net/sctp/sm_sideeffect.c 			sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
gfp              1474 net/sctp/sm_sideeffect.c 			sctp_packet_transmit(packet, gfp);
gfp              1692 net/sctp/sm_sideeffect.c 				sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1731 net/sctp/sm_sideeffect.c 			sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1761 net/sctp/sm_sideeffect.c 			sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
gfp              1769 net/sctp/sm_sideeffect.c 				sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1798 net/sctp/sm_sideeffect.c 			sctp_outq_uncork(&asoc->outqueue, gfp);
gfp              1800 net/sctp/sm_sideeffect.c 		sctp_outq_uncork(&asoc->outqueue, gfp);
gfp                79 net/sctp/stream.c 				 gfp_t gfp)
gfp                86 net/sctp/stream.c 	ret = genradix_prealloc(&stream->out, outcnt, gfp);
gfp                95 net/sctp/stream.c 				gfp_t gfp)
gfp               102 net/sctp/stream.c 	ret = genradix_prealloc(&stream->in, incnt, gfp);
gfp               111 net/sctp/stream.c 		     gfp_t gfp)
gfp               116 net/sctp/stream.c 	gfp |= __GFP_NOWARN;
gfp               129 net/sctp/stream.c 	ret = sctp_stream_alloc_out(stream, outcnt, gfp);
gfp               141 net/sctp/stream.c 	ret = sctp_stream_alloc_in(stream, incnt, gfp);
gfp                27 net/sctp/stream_interleave.c 					int len, __u8 flags, gfp_t gfp)
gfp                38 net/sctp/stream_interleave.c 	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
gfp               829 net/sctp/stream_interleave.c 			       struct sctp_chunk *chunk, gfp_t gfp)
gfp               835 net/sctp/stream_interleave.c 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
gfp               935 net/sctp/stream_interleave.c static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
gfp               964 net/sctp/stream_interleave.c 			       gfp_t gfp)
gfp               983 net/sctp/stream_interleave.c 	if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
gfp               984 net/sctp/stream_interleave.c 		sctp_intl_start_pd(ulpq, gfp);
gfp               990 net/sctp/stream_interleave.c 				      __u32 mid, __u16 flags, gfp_t gfp)
gfp              1000 net/sctp/stream_interleave.c 				      sid, mid, flags, gfp);
gfp              1064 net/sctp/stream_interleave.c static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
gfp              1077 net/sctp/stream_interleave.c 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
gfp              1084 net/sctp/stream_interleave.c 			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
gfp                26 net/sctp/stream_sched.c 			       __u16 value, gfp_t gfp)
gfp                44 net/sctp/stream_sched.c 				    gfp_t gfp)
gfp               200 net/sctp/stream_sched.c 			 __u16 value, gfp_t gfp)
gfp               213 net/sctp/stream_sched.c 	return asoc->outqueue.sched->set(&asoc->stream, sid, value, gfp);
gfp               257 net/sctp/stream_sched.c int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp)
gfp               263 net/sctp/stream_sched.c 	return sched->init_sid(stream, sid, gfp);
gfp                29 net/sctp/stream_sched_prio.c 			struct sctp_stream *stream, int prio, gfp_t gfp)
gfp                33 net/sctp/stream_sched_prio.c 	p = kmalloc(sizeof(*p), gfp);
gfp                46 net/sctp/stream_sched_prio.c 			struct sctp_stream *stream, int prio, gfp_t gfp)
gfp                77 net/sctp/stream_sched_prio.c 	return sctp_sched_prio_new_head(stream, prio, gfp);
gfp               151 net/sctp/stream_sched_prio.c 			       __u16 prio, gfp_t gfp)
gfp               159 net/sctp/stream_sched_prio.c 	prio_head = sctp_sched_prio_get_head(stream, prio, gfp);
gfp               201 net/sctp/stream_sched_prio.c 				    gfp_t gfp)
gfp               204 net/sctp/stream_sched_prio.c 	return sctp_sched_prio_set(stream, sid, 0, gfp);
gfp                66 net/sctp/stream_sched_rr.c 			     __u16 prio, gfp_t gfp)
gfp                86 net/sctp/stream_sched_rr.c 				  gfp_t gfp)
gfp                43 net/sctp/transport.c 						  gfp_t gfp)
gfp                92 net/sctp/transport.c 					  gfp_t gfp)
gfp                96 net/sctp/transport.c 	transport = kzalloc(sizeof(*transport), gfp);
gfp               100 net/sctp/transport.c 	if (!sctp_transport_init(net, transport, addr, gfp))
gfp                36 net/sctp/tsnmap.c 				     __u32 initial_tsn, gfp_t gfp)
gfp                39 net/sctp/tsnmap.c 		map->tsn_map = kzalloc(len>>3, gfp);
gfp                49 net/sctp/ulpevent.c 					       gfp_t gfp)
gfp                54 net/sctp/ulpevent.c 	skb = alloc_skb(size, gfp);
gfp               117 net/sctp/ulpevent.c 	__u16 inbound, struct sctp_chunk *chunk, gfp_t gfp)
gfp               131 net/sctp/ulpevent.c 				      sizeof(struct sctp_assoc_change), 0, gfp);
gfp               149 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               244 net/sctp/ulpevent.c 	int flags, int state, int error, gfp_t gfp)
gfp               251 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               357 net/sctp/ulpevent.c 				gfp_t gfp)
gfp               376 net/sctp/ulpevent.c 	skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
gfp               413 net/sctp/ulpevent.c 	__u16 flags, __u32 error, gfp_t gfp)
gfp               426 net/sctp/ulpevent.c 			      gfp);
gfp               521 net/sctp/ulpevent.c 	__u16 flags, gfp_t gfp)
gfp               528 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               583 net/sctp/ulpevent.c 	const struct sctp_association *asoc, gfp_t gfp)
gfp               590 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               619 net/sctp/ulpevent.c 						gfp_t gfp)
gfp               643 net/sctp/ulpevent.c 	skb = skb_clone(chunk->skb, gfp);
gfp               716 net/sctp/ulpevent.c 					__u32 flags, gfp_t gfp)
gfp               723 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               769 net/sctp/ulpevent.c 	__u32 indication, gfp_t gfp)
gfp               776 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               807 net/sctp/ulpevent.c 	const struct sctp_association *asoc, gfp_t gfp)
gfp               814 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               832 net/sctp/ulpevent.c 	__be16 *stream_list, gfp_t gfp)
gfp               840 net/sctp/ulpevent.c 	event = sctp_ulpevent_new(length, MSG_NOTIFICATION, gfp);
gfp               861 net/sctp/ulpevent.c 	__u32 remote_tsn, gfp_t gfp)
gfp               868 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp               888 net/sctp/ulpevent.c 	__u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp)
gfp               895 net/sctp/ulpevent.c 				  MSG_NOTIFICATION, gfp);
gfp                86 net/sctp/ulpqueue.c 			gfp_t gfp)
gfp                93 net/sctp/ulpqueue.c 	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
gfp              1027 net/sctp/ulpqueue.c 				gfp_t gfp)
gfp              1077 net/sctp/ulpqueue.c 		      gfp_t gfp)
gfp              1094 net/sctp/ulpqueue.c 		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
gfp              1100 net/sctp/ulpqueue.c 			sctp_ulpq_partial_delivery(ulpq, gfp);
gfp              1113 net/sctp/ulpqueue.c void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
gfp              1128 net/sctp/ulpqueue.c 					      0, 0, 0, gfp);
gfp               567 net/sunrpc/auth.c 		int flags, gfp_t gfp)
gfp               590 net/sunrpc/auth.c 	new = auth->au_ops->crcreate(auth, acred, flags, gfp);
gfp              1374 net/sunrpc/auth_gss/auth_gss.c gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
gfp              1380 net/sunrpc/auth_gss/auth_gss.c 	if (!(cred = kzalloc(sizeof(*cred), gfp)))
gfp              1014 net/sunrpc/sched.c 	gfp_t gfp = GFP_NOFS;
gfp              1017 net/sunrpc/sched.c 		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
gfp              1021 net/sunrpc/sched.c 		buf = mempool_alloc(rpc_buffer_mempool, gfp);
gfp              1023 net/sunrpc/sched.c 		buf = kmalloc(size, gfp);
gfp               143 net/sunrpc/xdr.c xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
gfp               148 net/sunrpc/xdr.c 		buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
gfp               327 net/sunrpc/xprtsock.c xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
gfp               337 net/sunrpc/xprtsock.c 		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
gfp                61 net/tipc/msg.c struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
gfp                66 net/tipc/msg.c 	skb = alloc_skb_fclone(buf_size, gfp);
gfp              1049 net/tipc/msg.h struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
gfp              1206 net/wireless/core.c 			 gfp_t gfp)
gfp              1214 net/wireless/core.c 	ev = kzalloc(sizeof(*ev), gfp);
gfp                60 net/wireless/ibss.c 			  struct ieee80211_channel *channel, gfp_t gfp)
gfp                72 net/wireless/ibss.c 	ev = kzalloc(sizeof(*ev), gfp);
gfp               195 net/wireless/mlme.c 				  const u8 *tsc, gfp_t gfp)
gfp               201 net/wireless/mlme.c 	char *buf = kmalloc(128, gfp);
gfp               216 net/wireless/mlme.c 	nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp);
gfp               847 net/wireless/mlme.c 			  gfp_t gfp)
gfp               861 net/wireless/mlme.c 	nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
gfp               870 net/wireless/mlme.c 			enum nl80211_radar_event event, gfp_t gfp)
gfp               907 net/wireless/mlme.c 	nl80211_radar_notify(rdev, chandef, event, netdev, gfp);
gfp              9635 net/wireless/nl80211.c 			    gfp_t gfp)
gfp              9641 net/wireless/nl80211.c 	skb = nlmsg_new(approxlen + 100, gfp);
gfp              9694 net/wireless/nl80211.c 					   int approxlen, gfp_t gfp)
gfp              9717 net/wireless/nl80211.c 					   cmd, attr, info, gfp);
gfp              9721 net/wireless/nl80211.c void __cfg80211_send_event_skb(struct sk_buff *skb, gfp_t gfp)
gfp              9743 net/wireless/nl80211.c 					skb, 0, mcgrp, gfp);
gfp              12635 net/wireless/nl80211.c 			struct cfg80211_nan_match_params *match, gfp_t gfp)
gfp              12646 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              12702 net/wireless/nl80211.c 					msg, 0, NL80211_MCGRP_NAN, gfp);
gfp              12717 net/wireless/nl80211.c 				  u64 cookie, gfp_t gfp)
gfp              12728 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              12762 net/wireless/nl80211.c 					msg, 0, NL80211_MCGRP_NAN, gfp);
gfp              15069 net/wireless/nl80211.c 				    enum nl80211_commands cmd, gfp_t gfp,
gfp              15076 net/wireless/nl80211.c 	msg = nlmsg_new(100 + len + req_ies_len, gfp);
gfp              15109 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15118 net/wireless/nl80211.c 			  size_t len, gfp_t gfp)
gfp              15121 net/wireless/nl80211.c 				NL80211_CMD_AUTHENTICATE, gfp, -1, NULL, 0);
gfp              15126 net/wireless/nl80211.c 			   size_t len, gfp_t gfp, int uapsd_queues,
gfp              15130 net/wireless/nl80211.c 				NL80211_CMD_ASSOCIATE, gfp, uapsd_queues,
gfp              15136 net/wireless/nl80211.c 			 size_t len, gfp_t gfp)
gfp              15139 net/wireless/nl80211.c 				NL80211_CMD_DEAUTHENTICATE, gfp, -1, NULL, 0);
gfp              15144 net/wireless/nl80211.c 			   size_t len, gfp_t gfp)
gfp              15147 net/wireless/nl80211.c 				NL80211_CMD_DISASSOCIATE, gfp, -1, NULL, 0);
gfp              15175 net/wireless/nl80211.c 				      const u8 *addr, gfp_t gfp)
gfp              15180 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15199 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15208 net/wireless/nl80211.c 			       gfp_t gfp)
gfp              15211 net/wireless/nl80211.c 				  addr, gfp);
gfp              15216 net/wireless/nl80211.c 				gfp_t gfp)
gfp              15219 net/wireless/nl80211.c 				  addr, gfp);
gfp              15225 net/wireless/nl80211.c 				 gfp_t gfp)
gfp              15232 net/wireless/nl80211.c 			(cr->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
gfp              15274 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15283 net/wireless/nl80211.c 			 struct cfg80211_roam_info *info, gfp_t gfp)
gfp              15291 net/wireless/nl80211.c 			(info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
gfp              15325 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15401 net/wireless/nl80211.c 			     gfp_t gfp)
gfp              15406 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15424 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15433 net/wireless/nl80211.c 					int sig_dbm, gfp_t gfp)
gfp              15445 net/wireless/nl80211.c 	msg = nlmsg_new(100 + ie_len, gfp);
gfp              15467 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15478 net/wireless/nl80211.c 				 const u8 *tsc, gfp_t gfp)
gfp              15483 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15505 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15572 net/wireless/nl80211.c 	unsigned int duration, gfp_t gfp)
gfp              15577 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15606 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15615 net/wireless/nl80211.c 			       unsigned int duration, gfp_t gfp)
gfp              15623 net/wireless/nl80211.c 					  duration, gfp);
gfp              15629 net/wireless/nl80211.c 					gfp_t gfp)
gfp              15636 net/wireless/nl80211.c 					  rdev, wdev, cookie, chan, 0, gfp);
gfp              15642 net/wireless/nl80211.c 					gfp_t gfp)
gfp              15649 net/wireless/nl80211.c 					  rdev, wdev, cookie, chan, 0, gfp);
gfp              15654 net/wireless/nl80211.c 		      struct station_info *sinfo, gfp_t gfp)
gfp              15662 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15673 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15678 net/wireless/nl80211.c 			    struct station_info *sinfo, gfp_t gfp)
gfp              15690 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              15703 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15709 net/wireless/nl80211.c 			  gfp_t gfp)
gfp              15716 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
gfp              15734 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15743 net/wireless/nl80211.c 				       const u8 *addr, gfp_t gfp)
gfp              15754 net/wireless/nl80211.c 	msg = nlmsg_new(100, gfp);
gfp              15779 net/wireless/nl80211.c 				const u8 *addr, gfp_t gfp)
gfp              15792 net/wireless/nl80211.c 					 addr, gfp);
gfp              15799 net/wireless/nl80211.c 					const u8 *addr, gfp_t gfp)
gfp              15814 net/wireless/nl80211.c 					 addr, gfp);
gfp              15823 net/wireless/nl80211.c 		      const u8 *buf, size_t len, u32 flags, gfp_t gfp)
gfp              15829 net/wireless/nl80211.c 	msg = nlmsg_new(100 + len, gfp);
gfp              15862 net/wireless/nl80211.c 			     const u8 *buf, size_t len, bool ack, gfp_t gfp)
gfp              15872 net/wireless/nl80211.c 	msg = nlmsg_new(100 + len, gfp);
gfp              15896 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              15906 net/wireless/nl80211.c 				     bool unencrypted, gfp_t gfp)
gfp              15922 net/wireless/nl80211.c 	msg = nlmsg_new(100 + skb->len, gfp);
gfp              15969 net/wireless/nl80211.c 					    const char *mac, gfp_t gfp)
gfp              15973 net/wireless/nl80211.c 	struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16006 net/wireless/nl80211.c static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp)
gfp              16017 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16022 net/wireless/nl80211.c 			      s32 rssi_level, gfp_t gfp)
gfp              16043 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, NULL, gfp);
gfp              16055 net/wireless/nl80211.c 	cfg80211_send_cqm(msg, gfp);
gfp              16066 net/wireless/nl80211.c 			     u32 rate, u32 intvl, gfp_t gfp)
gfp              16070 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, peer, gfp);
gfp              16083 net/wireless/nl80211.c 	cfg80211_send_cqm(msg, gfp);
gfp              16092 net/wireless/nl80211.c 				 const u8 *peer, u32 num_packets, gfp_t gfp)
gfp              16098 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, peer, gfp);
gfp              16105 net/wireless/nl80211.c 	cfg80211_send_cqm(msg, gfp);
gfp              16113 net/wireless/nl80211.c void cfg80211_cqm_beacon_loss_notify(struct net_device *dev, gfp_t gfp)
gfp              16117 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, NULL, gfp);
gfp              16124 net/wireless/nl80211.c 	cfg80211_send_cqm(msg, gfp);
gfp              16134 net/wireless/nl80211.c 				     const u8 *replay_ctr, gfp_t gfp)
gfp              16140 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16168 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16176 net/wireless/nl80211.c 			       const u8 *replay_ctr, gfp_t gfp)
gfp              16183 net/wireless/nl80211.c 	nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
gfp              16190 net/wireless/nl80211.c 			       const u8 *bssid, bool preauth, gfp_t gfp)
gfp              16196 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16225 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16233 net/wireless/nl80211.c 				     const u8 *bssid, bool preauth, gfp_t gfp)
gfp              16240 net/wireless/nl80211.c 	nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
gfp              16247 net/wireless/nl80211.c 				     gfp_t gfp,
gfp              16254 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16277 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16328 net/wireless/nl80211.c 		     struct net_device *netdev, gfp_t gfp)
gfp              16333 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16365 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16374 net/wireless/nl80211.c 				       gfp_t gfp)
gfp              16384 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16418 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16429 net/wireless/nl80211.c 			   bool is_valid_ack_signal, gfp_t gfp)
gfp              16438 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16462 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16576 net/wireless/nl80211.c 				   gfp_t gfp)
gfp              16588 net/wireless/nl80211.c 	msg = nlmsg_new(size, gfp);
gfp              16680 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16691 net/wireless/nl80211.c 				u16 reason_code, gfp_t gfp)
gfp              16701 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16722 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp              16840 net/wireless/nl80211.c void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
gfp              16854 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16909 net/wireless/nl80211.c 				   gfp_t gfp)
gfp              16919 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16950 net/wireless/nl80211.c 				    gfp_t gfp)
gfp              16959 net/wireless/nl80211.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              16979 net/wireless/nl80211.c 				NL80211_MCGRP_MLME, gfp);
gfp                66 net/wireless/nl80211.h 			  const u8 *buf, size_t len, gfp_t gfp);
gfp                69 net/wireless/nl80211.h 			   const u8 *buf, size_t len, gfp_t gfp,
gfp                74 net/wireless/nl80211.h 			 const u8 *buf, size_t len, gfp_t gfp);
gfp                77 net/wireless/nl80211.h 			   const u8 *buf, size_t len, gfp_t gfp);
gfp                80 net/wireless/nl80211.h 			       const u8 *addr, gfp_t gfp);
gfp                83 net/wireless/nl80211.h 				const u8 *addr, gfp_t gfp);
gfp                87 net/wireless/nl80211.h 				 gfp_t gfp);
gfp                90 net/wireless/nl80211.h 			 struct cfg80211_roam_info *info, gfp_t gfp);
gfp               101 net/wireless/nl80211.h 			    int key_id, const u8 *tsc, gfp_t gfp);
gfp               110 net/wireless/nl80211.h 			     gfp_t gfp);
gfp               115 net/wireless/nl80211.h 		      const u8 *buf, size_t len, u32 flags, gfp_t gfp);
gfp               121 net/wireless/nl80211.h 		     struct net_device *netdev, gfp_t gfp);
gfp               293 net/wireless/pmsr.c 			    gfp_t gfp)
gfp               301 net/wireless/pmsr.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp               489 net/wireless/pmsr.c 			  gfp_t gfp)
gfp               504 net/wireless/pmsr.c 	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
gfp              3346 net/wireless/reg.c 				 gfp_t gfp)
gfp              3364 net/wireless/reg.c 	reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp);
gfp                88 net/wireless/reg.h 				 gfp_t gfp);
gfp               234 net/wireless/scan.c 				  u8 *new_ie, gfp_t gfp)
gfp               244 net/wireless/scan.c 	sub_copy = kmemdup(subelement, subie_len, gfp);
gfp              1370 net/wireless/scan.c 				gfp_t gfp)
gfp              1416 net/wireless/scan.c 	ies = kzalloc(sizeof(*ies) + ielen, gfp);
gfp              1447 net/wireless/scan.c 			regulatory_hint_found_beacon(wiphy, channel, gfp);
gfp              1450 net/wireless/scan.c 			regulatory_hint_found_beacon(wiphy, channel, gfp);
gfp              1548 net/wireless/scan.c 				       gfp_t gfp)
gfp              1569 net/wireless/scan.c 	new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
gfp              1573 net/wireless/scan.c 	profile = kmalloc(ielen, gfp);
gfp              1633 net/wireless/scan.c 							 gfp);
gfp              1646 net/wireless/scan.c 							      gfp);
gfp              1664 net/wireless/scan.c 			 gfp_t gfp)
gfp              1671 net/wireless/scan.c 					      ielen, NULL, gfp);
gfp              1677 net/wireless/scan.c 				   gfp);
gfp              1687 net/wireless/scan.c 				 gfp_t gfp)
gfp              1700 net/wireless/scan.c 				   ie, ielen, non_tx_data, gfp);
gfp              1796 net/wireless/scan.c 				      gfp_t gfp)
gfp              1829 net/wireless/scan.c 	ies = kzalloc(sizeof(*ies) + ielen, gfp);
gfp              1866 net/wireless/scan.c 			regulatory_hint_found_beacon(wiphy, channel, gfp);
gfp              1869 net/wireless/scan.c 			regulatory_hint_found_beacon(wiphy, channel, gfp);
gfp              1881 net/wireless/scan.c 			       gfp_t gfp)
gfp              1891 net/wireless/scan.c 						    len, gfp);
gfp              1902 net/wireless/scan.c 					 &non_tx_data, gfp);
gfp               790 net/wireless/sme.c 			   gfp_t gfp)
gfp               835 net/wireless/sme.c 		     (params->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
gfp               957 net/wireless/sme.c 		     gfp_t gfp)
gfp               978 net/wireless/sme.c 		     (info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
gfp              1047 net/wireless/sme.c 			      gfp_t gfp)
gfp              1057 net/wireless/sme.c 	ev = kzalloc(sizeof(*ev), gfp);
gfp              1132 net/wireless/sme.c 			   bool locally_generated, gfp_t gfp)
gfp              1139 net/wireless/sme.c 	ev = kzalloc(sizeof(*ev) + ie_len, gfp);
gfp              1958 net/wireless/util.c int cfg80211_sinfo_alloc_tid_stats(struct station_info *sinfo, gfp_t gfp)
gfp              1962 net/wireless/util.c 				gfp);
gfp               384 net/xfrm/xfrm_policy.c struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
gfp               388 net/xfrm/xfrm_policy.c 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
gfp                55 security/apparmor/include/file.h 						    gfp_t gfp)
gfp                59 security/apparmor/include/file.h 	ctx = kzalloc(sizeof(struct aa_file_ctx), gfp);
gfp                60 security/apparmor/include/label.h 					     gfp_t gfp);
gfp               280 security/apparmor/include/label.h bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
gfp               281 security/apparmor/include/label.h struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
gfp               300 security/apparmor/include/label.h 				gfp_t gfp);
gfp               303 security/apparmor/include/label.h bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
gfp               313 security/apparmor/include/label.h 		      int flags, gfp_t gfp);
gfp               315 security/apparmor/include/label.h 			 struct aa_label *label, int flags, gfp_t gfp);
gfp               317 security/apparmor/include/label.h 		     struct aa_label *label, int flags, gfp_t gfp);
gfp               319 security/apparmor/include/label.h 			 struct aa_label *label, int flags, gfp_t gfp);
gfp               321 security/apparmor/include/label.h 		      gfp_t gfp);
gfp               322 security/apparmor/include/label.h void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp);
gfp               323 security/apparmor/include/label.h void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp);
gfp               324 security/apparmor/include/label.h void aa_label_printk(struct aa_label *label, gfp_t gfp);
gfp               327 security/apparmor/include/label.h 				     size_t n, gfp_t gfp, bool create,
gfp               330 security/apparmor/include/label.h 				gfp_t gfp, bool create, bool force_stack);
gfp               447 security/apparmor/include/label.h struct aa_proxy *aa_alloc_proxy(struct aa_label *l, gfp_t gfp);
gfp               104 security/apparmor/include/lib.h char *aa_str_alloc(int size, gfp_t gfp);
gfp               200 security/apparmor/include/lib.h 		    const char *name, gfp_t gfp);
gfp               179 security/apparmor/include/policy.h 				    gfp_t gfp);
gfp               181 security/apparmor/include/policy.h 				       const char *base, gfp_t gfp);
gfp                30 security/apparmor/include/secid.h int aa_alloc_secid(struct aa_label *label, gfp_t gfp);
gfp                60 security/apparmor/label.c struct aa_proxy *aa_alloc_proxy(struct aa_label *label, gfp_t gfp)
gfp                64 security/apparmor/label.c 	new = kzalloc(sizeof(struct aa_proxy), gfp);
gfp               401 security/apparmor/label.c bool aa_label_init(struct aa_label *label, int size, gfp_t gfp)
gfp               406 security/apparmor/label.c 	if (aa_alloc_secid(label, gfp) < 0)
gfp               426 security/apparmor/label.c struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp)
gfp               434 security/apparmor/label.c 			gfp);
gfp               439 security/apparmor/label.c 	if (!aa_label_init(new, size, gfp))
gfp               443 security/apparmor/label.c 		proxy = aa_alloc_proxy(new, gfp);
gfp               826 security/apparmor/label.c 						    int len, gfp_t gfp)
gfp               844 security/apparmor/label.c 	new = aa_label_alloc(len, NULL, gfp);
gfp               860 security/apparmor/label.c 					     gfp_t gfp)
gfp               867 security/apparmor/label.c 	return vec_create_and_insert_label(vec, len, gfp);
gfp              1191 security/apparmor/label.c 				gfp_t gfp)
gfp              1216 security/apparmor/label.c 		new = aa_label_alloc(a->size + b->size, NULL, gfp);
gfp              1416 security/apparmor/label.c bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp)
gfp              1429 security/apparmor/label.c 	if (aa_label_acntsxprint(&name, ns, label, FLAGS_NONE, gfp) == -1)
gfp              1660 security/apparmor/label.c 		      int flags, gfp_t gfp)
gfp              1671 security/apparmor/label.c 	*strp = kmalloc(size + 1, gfp);
gfp              1689 security/apparmor/label.c 			 struct aa_label *label, int flags, gfp_t gfp)
gfp              1700 security/apparmor/label.c 	*strp = aa_str_alloc(size + 1, gfp);
gfp              1708 security/apparmor/label.c 		     struct aa_label *label, int flags, gfp_t gfp)
gfp              1719 security/apparmor/label.c 		len  = aa_label_asxprint(&name, ns, label, flags, gfp);
gfp              1738 security/apparmor/label.c 			 struct aa_label *label, int flags, gfp_t gfp)
gfp              1747 security/apparmor/label.c 		len = aa_label_asxprint(&str, ns, label, flags, gfp);
gfp              1762 security/apparmor/label.c 		      gfp_t gfp)
gfp              1770 security/apparmor/label.c 		len = aa_label_asxprint(&str, ns, label, flags, gfp);
gfp              1784 security/apparmor/label.c void aa_label_audit(struct audit_buffer *ab, struct aa_label *label, gfp_t gfp)
gfp              1788 security/apparmor/label.c 	aa_label_xaudit(ab, ns, label, FLAG_VIEW_SUBNS, gfp);
gfp              1792 security/apparmor/label.c void aa_label_seq_print(struct seq_file *f, struct aa_label *label, gfp_t gfp)
gfp              1796 security/apparmor/label.c 	aa_label_seq_xprint(f, ns, label, FLAG_VIEW_SUBNS, gfp);
gfp              1800 security/apparmor/label.c void aa_label_printk(struct aa_label *label, gfp_t gfp)
gfp              1804 security/apparmor/label.c 	aa_label_xprintk(ns, label, FLAG_VIEW_SUBNS, gfp);
gfp              1858 security/apparmor/label.c 				     size_t n, gfp_t gfp, bool create,
gfp              1883 security/apparmor/label.c 	error = vec_setup(profile, vec, len, gfp);
gfp              1922 security/apparmor/label.c 		label = aa_vec_find_or_create_label(vec, len, gfp);
gfp              1939 security/apparmor/label.c 				gfp_t gfp, bool create, bool force_stack)
gfp              1941 security/apparmor/label.c 	return aa_label_strn_parse(base, str, strlen(str), gfp, create,
gfp               135 security/apparmor/lib.c __counted char *aa_str_alloc(int size, gfp_t gfp)
gfp               139 security/apparmor/lib.c 	str = kmalloc(sizeof(struct counted_str) + size, gfp);
gfp               489 security/apparmor/lib.c 		    const char *name, gfp_t gfp)
gfp               495 security/apparmor/lib.c 		hname = aa_str_alloc(strlen(prefix) + strlen(name) + 3, gfp);
gfp               499 security/apparmor/lib.c 		hname = aa_str_alloc(strlen(name) + 1, gfp);
gfp                65 security/apparmor/lsm.c static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
gfp                75 security/apparmor/lsm.c 				 gfp_t gfp)
gfp               257 security/apparmor/policy.c 				    gfp_t gfp)
gfp               263 security/apparmor/policy.c 			  gfp);
gfp               267 security/apparmor/policy.c 	if (!aa_policy_init(&profile->base, NULL, hname, gfp))
gfp               269 security/apparmor/policy.c 	if (!aa_label_init(&profile->label, 1, gfp))
gfp               274 security/apparmor/policy.c 		proxy = aa_alloc_proxy(&profile->label, gfp);
gfp               503 security/apparmor/policy.c 				       const char *base, gfp_t gfp)
gfp               513 security/apparmor/policy.c 			       gfp);
gfp               521 security/apparmor/policy.c 	name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, gfp);
gfp               534 security/apparmor/policy.c 	profile = aa_alloc_profile(name, NULL, gfp);
gfp               124 security/apparmor/secid.c int aa_alloc_secid(struct aa_label *label, gfp_t gfp)
gfp               129 security/apparmor/secid.c 	idr_preload(gfp);
gfp               493 security/security.c static int lsm_cred_alloc(struct cred *cred, gfp_t gfp)
gfp               500 security/security.c 	cred->security = kzalloc(blob_sizes.lbs_cred, gfp);
gfp              1524 security/security.c int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
gfp              1526 security/security.c 	int rc = lsm_cred_alloc(cred, gfp);
gfp              1531 security/security.c 	rc = call_int_hook(cred_alloc_blank, 0, cred, gfp);
gfp              1552 security/security.c int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
gfp              1554 security/security.c 	int rc = lsm_cred_alloc(new, gfp);
gfp              1559 security/security.c 	rc = call_int_hook(cred_prepare, 0, new, old, gfp);
gfp              2229 security/security.c 			       gfp_t gfp)
gfp              2231 security/security.c 	return call_int_hook(xfrm_policy_alloc_security, 0, ctxp, sec_ctx, gfp);
gfp              3896 security/selinux/hooks.c 				gfp_t gfp)
gfp               264 security/selinux/include/security.h 			    u32 *out_sid, gfp_t gfp);
gfp               267 security/selinux/include/security.h 				const char *scontext, u32 *out_sid, gfp_t gfp);
gfp                15 security/selinux/include/xfrm.h 			      gfp_t gfp);
gfp              1528 security/selinux/ss/services.c 			    gfp_t gfp)
gfp              1531 security/selinux/ss/services.c 					    sid, SECSID_NULL, gfp, 0);
gfp              1535 security/selinux/ss/services.c 				const char *scontext, u32 *sid, gfp_t gfp)
gfp              1538 security/selinux/ss/services.c 				       sid, gfp);
gfp                76 security/selinux/xfrm.c 				   gfp_t gfp)
gfp                92 security/selinux/xfrm.c 	ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
gfp               102 security/selinux/xfrm.c 				     &ctx->ctx_sid, gfp);
gfp               286 security/selinux/xfrm.c 			      gfp_t gfp)
gfp               288 security/selinux/xfrm.c 	return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
gfp               347 security/smack/smack_lsm.c 				gfp_t gfp)
gfp               354 security/smack/smack_lsm.c 		nrp = kmem_cache_zalloc(smack_rule_cache, gfp);
gfp               374 security/smack/smack_lsm.c 				gfp_t gfp)
gfp               380 security/smack/smack_lsm.c 		nklep = kzalloc(sizeof(struct smack_known_list_elem), gfp);
gfp              1907 security/smack/smack_lsm.c static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
gfp              1944 security/smack/smack_lsm.c 			      gfp_t gfp)
gfp              1952 security/smack/smack_lsm.c 	rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
gfp              1957 security/smack/smack_lsm.c 				gfp);
gfp                37 security/tomoyo/tomoyo.c 			       gfp_t gfp)
gfp               726 tools/perf/builtin-kmem.c 	struct gfp_flag *gfp;
gfp               728 tools/perf/builtin-kmem.c 	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
gfp               729 tools/perf/builtin-kmem.c 	if (gfp)
gfp               730 tools/perf/builtin-kmem.c 		return gfp->compact_str;
gfp                74 tools/testing/radix-tree/linux.c void *kmalloc(size_t size, gfp_t gfp)
gfp                78 tools/testing/radix-tree/linux.c 	if (!(gfp & __GFP_DIRECT_RECLAIM))
gfp                85 tools/testing/radix-tree/linux.c 	if (gfp & __GFP_ZERO)
gfp                15 tools/testing/radix-tree/linux/slab.h static inline void *kzalloc(size_t size, gfp_t gfp)
gfp                17 tools/testing/radix-tree/linux/slab.h         return kmalloc(size, gfp | __GFP_ZERO);
gfp                53 tools/virtio/linux/kernel.h static inline void *kmalloc(size_t s, gfp_t gfp)
gfp                59 tools/virtio/linux/kernel.h static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp)
gfp                61 tools/virtio/linux/kernel.h 	return kmalloc(n * s, gfp);
gfp                64 tools/virtio/linux/kernel.h static inline void *kzalloc(size_t s, gfp_t gfp)
gfp                66 tools/virtio/linux/kernel.h 	void *p = kmalloc(s, gfp);
gfp                72 tools/virtio/linux/kernel.h static inline void *alloc_pages_exact(size_t s, gfp_t gfp)
gfp                74 tools/virtio/linux/kernel.h 	return kmalloc(s, gfp);
gfp                89 tools/virtio/linux/kernel.h static inline void *krealloc(void *p, size_t s, gfp_t gfp)
gfp                95 tools/virtio/linux/kernel.h static inline unsigned long __get_free_page(gfp_t gfp)
gfp                34 tools/virtio/linux/virtio.h 		      gfp_t gfp);
gfp                39 tools/virtio/linux/virtio.h 			 gfp_t gfp);
gfp                44 tools/virtio/linux/virtio.h 			gfp_t gfp);
gfp                27 tools/virtio/ringtest/ptr_ring.c static void *kmalloc(unsigned size, gfp_t gfp)
gfp                33 tools/virtio/ringtest/ptr_ring.c 	if (gfp & __GFP_ZERO)