/linux-4.4.14/include/linux/ |
D | gfp.h | 408 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 412 __alloc_pages(gfp_t gfp_mask, unsigned int order, in __alloc_pages() argument 415 return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL); in __alloc_pages() 423 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument 428 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); in __alloc_pages_node() 436 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 442 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node() 446 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); 449 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument 451 return alloc_pages_current(gfp_mask, order); in alloc_pages() [all …]
|
D | mempool.h | 11 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 30 gfp_t gfp_mask, int nid); 34 extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); 42 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 55 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 67 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
D | cpuset.h | 51 extern int __cpuset_node_allowed(int node, gfp_t gfp_mask); 53 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed() 58 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in cpuset_zone_allowed() 175 static inline int cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 180 static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
D | page_owner.h | 10 unsigned int order, gfp_t gfp_mask); 22 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument 27 __set_page_owner(page, order, gfp_mask); in set_page_owner() 42 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
|
D | idr.h | 80 void idr_preload(gfp_t gfp_mask); 81 int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); 82 int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); 162 int ida_pre_get(struct ida *ida, gfp_t gfp_mask); 169 gfp_t gfp_mask);
|
D | pagemap.h | 74 gfp_t gfp_mask) in mapping_gfp_constraint() argument 76 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint() 325 pgoff_t offset, gfp_t gfp_mask) in find_or_create_page() argument 329 gfp_mask); in find_or_create_page() 380 pgoff_t index, gfp_t gfp_mask); 657 pgoff_t index, gfp_t gfp_mask); 659 pgoff_t index, gfp_t gfp_mask); 663 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 670 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache() argument 675 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); in add_to_page_cache()
|
D | netlink.h | 74 gfp_t gfp_mask); 77 gfp_t gfp_mask) in netlink_alloc_skb() argument 79 return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask); in netlink_alloc_skb() 101 netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in netlink_skb_clone() argument 105 nskb = skb_clone(skb, gfp_mask); in netlink_skb_clone()
|
D | radix-tree.h | 109 gfp_t gfp_mask; member 115 .gfp_mask = (mask), \ 125 (root)->gfp_mask = (mask); \ 280 int radix_tree_preload(gfp_t gfp_mask); 281 int radix_tree_maybe_preload(gfp_t gfp_mask);
|
D | swap.h | 320 gfp_t gfp_mask, nodemask_t *mask); 324 gfp_t gfp_mask, 327 gfp_t gfp_mask, bool noswap, 475 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument 497 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 519 gfp_t gfp_mask) in add_to_swap_cache() argument
|
D | mISDNif.h | 538 mI_alloc_skb(unsigned int len, gfp_t gfp_mask) in mI_alloc_skb() argument 542 skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); in mI_alloc_skb() 549 _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) in _alloc_mISDN_skb() argument 551 struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); in _alloc_mISDN_skb() 566 u_int id, u_int len, void *dp, gfp_t gfp_mask) in _queue_data() argument 572 skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); in _queue_data()
|
D | bio.h | 444 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) in bio_alloc() argument 446 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); in bio_alloc() 449 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) in bio_clone() argument 451 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); in bio_clone() 454 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) in bio_kmalloc() argument 456 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); in bio_kmalloc() 459 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) in bio_clone_kmalloc() argument 461 return bio_clone_bioset(bio, gfp_mask, NULL); in bio_clone_kmalloc() 803 gfp_t gfp_mask) in bio_integrity_clone() argument
|
D | connector.h | 74 int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask); 75 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
|
D | textsearch.h | 162 gfp_t gfp_mask) in alloc_ts_config() argument 166 conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask); in alloc_ts_config()
|
D | compaction.h | 41 extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 56 static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, in try_to_compact_pages() argument
|
D | skbuff.h | 822 gfp_t gfp_mask); 866 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 870 gfp_t gfp_mask, bool fclone); 872 gfp_t gfp_mask) in __pskb_copy() argument 874 return __pskb_copy_fclone(skb, headroom, gfp_mask, false); in __pskb_copy() 877 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); 2255 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) in skb_orphan_frags() argument 2259 return skb_copy_ubufs(skb, gfp_mask); in skb_orphan_frags() 2281 gfp_t gfp_mask); 2304 gfp_t gfp_mask) in __dev_alloc_skb() argument [all …]
|
D | vmalloc.h | 77 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 79 unsigned long start, unsigned long end, gfp_t gfp_mask,
|
D | shrinker.h | 12 gfp_t gfp_mask; member
|
D | page_ext.h | 46 gfp_t gfp_mask; member
|
D | audit.h | 453 void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, 456 extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); 504 void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, in audit_log() argument 508 gfp_t gfp_mask, int type) in audit_log_start() argument
|
D | memcontrol.h | 297 gfp_t gfp_mask, struct mem_cgroup **memcgp); 466 gfp_t gfp_mask, 515 gfp_t gfp_mask, in mem_cgroup_try_charge() argument 658 gfp_t gfp_mask, in mem_cgroup_soft_limit_reclaim() argument
|
D | blkdev.h | 779 struct bio_set *bs, gfp_t gfp_mask, 1122 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1124 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1126 sector_t nr_sects, gfp_t gfp_mask, bool discard); 1128 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) in sb_issue_discard() argument 1132 gfp_mask, flags); in sb_issue_discard() 1135 sector_t nr_blocks, gfp_t gfp_mask) in sb_issue_zeroout() argument 1140 gfp_mask, true); in sb_issue_zeroout() 1698 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument
|
D | scatterlist.h | 254 gfp_t gfp_mask); 267 gfp_t gfp_mask);
|
D | kmod.h | 73 call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask,
|
D | oom.h | 27 const gfp_t gfp_mask; member
|
D | shmem_fs.h | 56 pgoff_t index, gfp_t gfp_mask);
|
D | btree.h | 48 void *btree_alloc(gfp_t gfp_mask, void *pool_data);
|
D | kfifo.h | 332 #define kfifo_alloc(fifo, size, gfp_mask) \ argument 338 __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ 772 size_t esize, gfp_t gfp_mask);
|
D | elevator.h | 140 struct bio *bio, gfp_t gfp_mask);
|
D | suspend.h | 379 extern unsigned long get_safe_page(gfp_t gfp_mask);
|
D | writeback.h | 322 void throttle_vm_writeout(gfp_t gfp_mask);
|
D | jbd2.h | 1224 gfp_t gfp_mask, unsigned int type, 1227 extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
|
D | workqueue.h | 423 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
|
D | lockdep.h | 354 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
|
D | mm.h | 1262 extern int try_to_release_page(struct page * page, gfp_t gfp_mask); 1815 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
|
D | device.h | 675 gfp_t gfp_mask, unsigned int order);
|
/linux-4.4.14/mm/ |
D | mempool.c | 189 gfp_t gfp_mask, int node_id) in mempool_create_node() argument 192 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 196 gfp_mask, node_id); in mempool_create_node() 214 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_create_node() 315 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 322 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 323 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc() 325 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc() 326 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ in mempool_alloc() 327 gfp_mask |= __GFP_NOWARN; /* failures are OK */ in mempool_alloc() [all …]
|
D | swap_state.c | 119 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) in add_to_swap_cache() argument 123 error = radix_tree_maybe_preload(gfp_mask); in add_to_swap_cache() 291 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 314 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async() 322 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); in __read_swap_cache_async() 389 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 393 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async() 465 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead() argument 489 gfp_mask, vma, addr); in swapin_readahead() 500 return read_swap_cache_async(entry, gfp_mask, vma, addr); in swapin_readahead()
|
D | page_alloc.c | 2115 gfp_t gfp_mask; in split_page() local 2129 gfp_mask = get_page_owner_gfp(page); in split_page() 2130 set_page_owner(page, 0, gfp_mask); in split_page() 2133 set_page_owner(page + i, 0, gfp_mask); in split_page() 2310 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2314 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page() 2316 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page() 2319 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page() 2360 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2500 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument [all …]
|
D | vmscan.c | 66 gfp_t gfp_mask; member 406 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument 433 .gfp_mask = gfp_mask, in shrink_slab() 933 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list() 934 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list() 1048 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list() 1157 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list() 1238 .gfp_mask = GFP_KERNEL, in reclaim_clean_pages_from_list() 1479 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated() 2296 throttle_vm_writeout(sc->gfp_mask); in shrink_lruvec() [all …]
|
D | page_owner.c | 60 void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) in __set_page_owner() argument 73 page_ext->gfp_mask = gfp_mask; in __set_page_owner() 83 return page_ext->gfp_mask; in __get_page_owner_gfp() 104 page_ext->order, page_ext->gfp_mask); in print_page_owner() 111 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); in print_page_owner()
|
D | vmalloc.c | 354 int node, gfp_t gfp_mask) in alloc_vmap_area() argument 367 gfp_mask & GFP_RECLAIM_MASK, node); in alloc_vmap_area() 375 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area() 818 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument 830 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block() 836 node, gfp_mask); in new_vmap_block() 842 err = radix_tree_preload(gfp_mask); in new_vmap_block() 934 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument 982 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc() 1332 unsigned long end, int node, gfp_t gfp_mask, const void *caller) in __get_vm_area_node() argument [all …]
|
D | page_isolation.c | 278 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; in alloc_migrate_target() local 294 gfp_mask |= __GFP_HIGHMEM; in alloc_migrate_target() 296 return alloc_page(gfp_mask); in alloc_migrate_target()
|
D | oom_kill.c | 213 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); in constrained_alloc() 227 if (oc->gfp_mask & __GFP_THISNODE) in constrained_alloc() 246 if (!cpuset_zone_allowed(zone, oc->gfp_mask)) in constrained_alloc() 391 current->comm, oc->gfp_mask, oc->order, in dump_header() 749 .gfp_mask = 0, in pagefault_out_of_memory()
|
D | filemap.c | 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete() 516 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page() argument 524 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); in replace_page_cache_page() 609 pgoff_t offset, gfp_t gfp_mask, in __add_to_page_cache_locked() argument 621 gfp_mask, &memcg); in __add_to_page_cache_locked() 626 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); in __add_to_page_cache_locked() 672 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_locked() argument 675 gfp_mask, NULL); in add_to_page_cache_locked() 680 pgoff_t offset, gfp_t gfp_mask) in add_to_page_cache_lru() argument 687 gfp_mask, &shadow); in add_to_page_cache_lru() [all …]
|
D | compaction.c | 1336 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); in compact_zone() 1479 gfp_t gfp_mask, enum migrate_mode mode, int *contended, in compact_zone_order() argument 1487 .gfp_mask = gfp_mask, in compact_zone_order() 1519 unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument 1523 int may_enter_fs = gfp_mask & __GFP_FS; in try_to_compact_pages() 1524 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages() 1536 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); in try_to_compact_pages() 1547 status = compact_zone_order(zone, order, gfp_mask, mode, in try_to_compact_pages()
|
D | memcontrol.c | 1336 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument 1342 .gfp_mask = gfp_mask, in mem_cgroup_out_of_memory() 1516 gfp_t gfp_mask, in mem_cgroup_soft_reclaim() argument 1555 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, in mem_cgroup_soft_reclaim() 2027 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument 2075 if (!gfpflags_allow_blocking(gfp_mask)) in try_charge() 2081 gfp_mask, may_swap); in try_charge() 2092 if (gfp_mask & __GFP_NORETRY) in try_charge() 2115 if (gfp_mask & __GFP_NOFAIL) in try_charge() 2123 mem_cgroup_oom(mem_over_limit, gfp_mask, in try_charge() [all …]
|
D | internal.h | 209 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ member
|
D | nommu.c | 264 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) in __vmalloc() argument 270 return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); in __vmalloc()
|
D | page-writeback.c | 1925 void throttle_vm_writeout(gfp_t gfp_mask) in throttle_vm_writeout() argument 1950 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) in throttle_vm_writeout()
|
D | vmstat.c | 1098 page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); in pagetypeinfo_showmixedcount_print()
|
D | swapfile.c | 2772 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) in add_swap_count_continuation() argument 2785 page = alloc_page(gfp_mask | __GFP_HIGHMEM); in add_swap_count_continuation()
|
/linux-4.4.14/fs/nfs/blocklayout/ |
D | dev.c | 189 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 194 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 199 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 221 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 226 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 237 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 250 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 266 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument 279 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe() 295 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_deviceid() argument [all …]
|
D | blocklayout.h | 166 struct pnfs_device *pdev, gfp_t gfp_mask); 183 struct pnfs_block_volume *b, gfp_t gfp_mask);
|
D | blocklayout.c | 542 gfp_t gfp_mask) in bl_alloc_extent() argument 562 lo->plh_lc_cred, gfp_mask); in bl_alloc_extent() 596 gfp_t gfp_mask) in bl_alloc_lseg() argument 616 lseg = kzalloc(sizeof(*lseg), gfp_mask); in bl_alloc_lseg() 621 scratch = alloc_page(gfp_mask); in bl_alloc_lseg() 642 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask); in bl_alloc_lseg()
|
D | rpc_pipefs.c | 54 gfp_t gfp_mask) in bl_resolve_deviceid() argument 77 msg->data = kzalloc(msg->len, gfp_mask); in bl_resolve_deviceid()
|
/linux-4.4.14/fs/btrfs/ |
D | ulist.h | 55 struct ulist *ulist_alloc(gfp_t gfp_mask); 57 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 59 u64 *old_aux, gfp_t gfp_mask); 64 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 68 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 72 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
D | ulist.c | 92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
D | extent_io.h | 350 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); 351 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
|
D | backref.c | 197 gfp_t gfp_mask) in __add_prelim_ref() argument 204 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); in __add_prelim_ref()
|
D | extent_io.c | 2778 struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) in btrfs_bio_clone() argument 2783 new = bio_clone_bioset(bio, gfp_mask, btrfs_bioset); in btrfs_bio_clone() 2800 struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) in btrfs_io_bio_alloc() argument 2805 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset); in btrfs_io_bio_alloc()
|
/linux-4.4.14/block/ |
D | blk-lib.c | 41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 78 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard() 145 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument 166 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_write_same() 216 sector_t nr_sects, gfp_t gfp_mask) in __blkdev_issue_zeroout() argument 230 bio = bio_alloc(gfp_mask, in __blkdev_issue_zeroout() 286 sector_t nr_sects, gfp_t gfp_mask, bool discard) in blkdev_issue_zeroout() argument 291 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0) in blkdev_issue_zeroout() 295 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, in blkdev_issue_zeroout() 299 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask); in blkdev_issue_zeroout()
|
D | blk-map.c | 83 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument 111 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov() 113 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov() 145 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument 154 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user() 204 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern() argument 219 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern() 221 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
|
D | bio.c | 174 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument 211 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc() 214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); in bvec_alloc() 228 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { in bvec_alloc() 423 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) in bio_alloc_bioset() argument 425 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset() 439 gfp_mask); in bio_alloc_bioset() 468 gfp_mask &= ~__GFP_DIRECT_RECLAIM; in bio_alloc_bioset() 470 p = mempool_alloc(bs->bio_pool, gfp_mask); in bio_alloc_bioset() 471 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset() [all …]
|
D | blk.h | 62 gfp_t gfp_mask); 267 gfp_t gfp_mask); 270 int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); 284 static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) in create_io_context() argument 288 create_task_io_context(current, gfp_mask, node); in create_io_context()
|
D | bio-integrity.c | 51 gfp_t gfp_mask, in bio_integrity_alloc() argument 61 sizeof(struct bio_vec) * nr_vecs, gfp_mask); in bio_integrity_alloc() 64 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); in bio_integrity_alloc() 74 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, in bio_integrity_alloc() 460 gfp_t gfp_mask) in bio_integrity_clone() argument 467 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); in bio_integrity_clone()
|
D | blk-core.c | 604 static void *alloc_request_struct(gfp_t gfp_mask, void *data) in alloc_request_struct() argument 607 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid); in alloc_request_struct() 616 gfp_t gfp_mask) in blk_init_rl() argument 629 (void *)(long)q->node, gfp_mask, in blk_init_rl() 643 struct request_queue *blk_alloc_queue(gfp_t gfp_mask) in blk_alloc_queue() argument 645 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); in blk_alloc_queue() 683 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) in blk_alloc_queue_node() argument 689 gfp_mask | __GFP_ZERO, node_id); in blk_alloc_queue_node() 693 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); in blk_alloc_queue_node() 1061 struct bio *bio, gfp_t gfp_mask) in __get_request() argument [all …]
|
D | blk-ioc.c | 358 gfp_t gfp_mask) in ioc_create_icq() argument 364 icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO, in ioc_create_icq() 369 if (radix_tree_maybe_preload(gfp_mask) < 0) { in ioc_create_icq()
|
D | bounce.c | 75 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) in mempool_alloc_pages_isa() argument 77 return mempool_alloc_pages(gfp_mask | GFP_DMA, data); in mempool_alloc_pages_isa()
|
D | blk-cgroup.c | 92 gfp_t gfp_mask) in blkg_alloc() argument 98 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc() 102 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || in blkg_alloc() 103 blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) in blkg_alloc() 113 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc() 126 pd = pol->pd_alloc_fn(gfp_mask, q->node); in blkg_alloc()
|
D | blk-flush.c | 462 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, in blkdev_issue_flush() argument 485 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
|
D | elevator.c | 703 struct bio *bio, gfp_t gfp_mask) in elv_set_request() argument 708 return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask); in elv_set_request()
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_alloc.c | 39 static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_alloc_mqsp_chunk() argument 47 &dma_addr, gfp_mask); in c2_alloc_mqsp_chunk() 71 int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_init_mqsp_pool() argument 74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); in c2_init_mqsp_pool() 90 dma_addr_t *dma_addr, gfp_t gfp_mask) in c2_alloc_mqsp() argument 100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == in c2_alloc_mqsp()
|
D | c2.h | 541 extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, 545 dma_addr_t *dma_addr, gfp_t gfp_mask);
|
/linux-4.4.14/lib/ |
D | idr.c | 94 static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) in idr_layer_alloc() argument 109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN); in idr_layer_alloc() 134 return kmem_cache_zalloc(idr_layer_cache, gfp_mask); in idr_layer_alloc() 192 static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask) in __idr_pre_get() argument 196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); in __idr_pre_get() 221 gfp_t gfp_mask, struct idr *layer_idr) in sub_alloc() argument 273 new = idr_layer_alloc(gfp_mask, layer_idr); in sub_alloc() 290 struct idr_layer **pa, gfp_t gfp_mask, in idr_get_empty_slot() argument 302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr))) in idr_get_empty_slot() 322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) { in idr_get_empty_slot() [all …]
|
D | scatterlist.c | 164 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument 176 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc() 177 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc() 180 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); in sg_kmalloc() 277 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) in __sg_alloc_table() argument 308 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table() 359 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument 364 NULL, gfp_mask, sg_kmalloc); in sg_alloc_table() 395 gfp_t gfp_mask) in sg_alloc_table_from_pages() argument 409 ret = sg_alloc_table(sgt, chunks, gfp_mask); in sg_alloc_table_from_pages()
|
D | radix-tree.c | 85 return root->gfp_mask & __GFP_BITS_MASK; in root_gfp_mask() 108 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); in root_tag_set() 113 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); in root_tag_clear() 118 root->gfp_mask &= __GFP_BITS_MASK; in root_tag_clear_all() 123 return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); in root_tag_get() 184 gfp_t gfp_mask = root_gfp_mask(root); in radix_tree_node_alloc() local 191 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { in radix_tree_node_alloc() 213 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in radix_tree_node_alloc() 254 static int __radix_tree_preload(gfp_t gfp_mask) in __radix_tree_preload() argument 264 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in __radix_tree_preload() [all …]
|
D | sg_split.c | 154 gfp_t gfp_mask) in sg_split() argument 159 splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask); in sg_split() 172 gfp_mask); in sg_split()
|
D | textsearch.c | 262 unsigned int len, gfp_t gfp_mask, int flags) in textsearch_prepare() argument 287 conf = ops->init(pattern, len, gfp_mask, flags); in textsearch_prepare()
|
D | ts_kmp.c | 96 gfp_t gfp_mask, int flags) in kmp_init() argument 104 conf = alloc_ts_config(priv_size, gfp_mask); in kmp_init()
|
D | ts_bm.c | 146 gfp_t gfp_mask, int flags) in bm_init() argument 154 conf = alloc_ts_config(priv_size, gfp_mask); in bm_init()
|
D | ts_fsm.c | 260 gfp_t gfp_mask, int flags) in fsm_init() argument 286 conf = alloc_ts_config(priv_size, gfp_mask); in fsm_init()
|
D | kfifo.c | 39 size_t esize, gfp_t gfp_mask) in __kfifo_alloc() argument 57 fifo->data = kmalloc(size * esize, gfp_mask); in __kfifo_alloc()
|
D | kobject.c | 146 char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) in kobject_get_path() argument 154 path = kzalloc(len, gfp_mask); in kobject_get_path()
|
D | btree.c | 81 void *btree_alloc(gfp_t gfp_mask, void *pool_data) in btree_alloc() argument 83 return kmem_cache_alloc(btree_cachep, gfp_mask); in btree_alloc()
|
/linux-4.4.14/arch/tile/include/asm/ |
D | homecache.h | 92 extern struct page *homecache_alloc_pages(gfp_t gfp_mask, 94 extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, 96 #define homecache_alloc_page(gfp_mask, home) \ argument 97 homecache_alloc_pages(gfp_mask, 0, home)
|
D | kexec.h | 50 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order);
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 116 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument 125 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink() 150 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument 160 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
|
D | ion_priv.h | 123 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); 375 gfp_t gfp_mask; member 380 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); 392 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
|
D | ion_heap.c | 277 total += heap->ops->shrink(heap, sc->gfp_mask, 0); in ion_heap_shrink_count() 305 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); in ion_heap_shrink_scan()
|
D | ion_system_heap.c | 213 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, in ion_system_heap_shrink() argument 229 nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); in ion_system_heap_shrink()
|
D | ion.c | 1477 sc.gfp_mask = -1; in debug_shrink_set() 1495 sc.gfp_mask = -1; in debug_shrink_get()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 98 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 102 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 104 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 114 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 117 &sg_dma_address(mem), gfp_mask); in mlx4_alloc_icm_coherent() 128 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 136 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 139 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 143 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 156 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
D | icm.h | 71 gfp_t gfp_mask, int coherent);
|
/linux-4.4.14/fs/ntfs/ |
D | malloc.h | 42 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument 47 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc() 51 return __vmalloc(size, gfp_mask, PAGE_KERNEL); in __ntfs_malloc()
|
/linux-4.4.14/net/sunrpc/auth_gss/ |
D | gss_krb5_mech.c | 378 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument 405 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3() 496 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument 512 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 527 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 542 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 552 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 562 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 572 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new() 608 gfp_t gfp_mask) in gss_import_v2_context() argument [all …]
|
D | gss_krb5_keys.c | 145 gfp_t gfp_mask) in krb5_derive_key() argument 170 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_derive_key() 174 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_derive_key() 178 rawkey = kmalloc(keybytes, gfp_mask); in krb5_derive_key()
|
D | gss_mech_switch.c | 391 gfp_t gfp_mask) in gss_import_sec_context() argument 393 if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) in gss_import_sec_context() 398 *ctx_id, endtime, gfp_mask); in gss_import_sec_context()
|
/linux-4.4.14/drivers/connector/ |
D | connector.c | 74 gfp_t gfp_mask) in cn_netlink_send_mult() argument 108 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 126 gfp_mask); in cn_netlink_send_mult() 128 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 134 gfp_t gfp_mask) in cn_netlink_send() argument 136 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()
|
/linux-4.4.14/drivers/scsi/ |
D | scsi.c | 108 gfp_t gfp_mask; member 121 .gfp_mask = __GFP_DMA, 154 scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask) in scsi_host_alloc_command() argument 159 cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 164 gfp_mask | pool->gfp_mask); in scsi_host_alloc_command() 169 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); in scsi_host_alloc_command() 193 __scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask) in __scsi_get_command() argument 195 struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask); in __scsi_get_command() 231 struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) in scsi_get_command() argument 233 struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); in scsi_get_command() [all …]
|
D | hosts.c | 390 gfp_t gfp_mask = GFP_KERNEL; in scsi_host_alloc() local 393 gfp_mask |= __GFP_DMA; in scsi_host_alloc() 395 shost = kzalloc(sizeof(struct Scsi_Host) + privsize, gfp_mask); in scsi_host_alloc()
|
D | sg.c | 1830 gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; in sg_build_indirect() local 1857 gfp_mask |= GFP_DMA; in sg_build_indirect() 1860 gfp_mask |= __GFP_ZERO; in sg_build_indirect() 1872 schp->pages[k] = alloc_pages(gfp_mask, order); in sg_build_indirect()
|
D | eata.c | 1347 gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; in port_detect() local 1348 ha->cp[i].sglist = kmalloc(sz, gfp_mask); in port_detect()
|
D | scsi_lib.c | 577 static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) in scsi_sg_alloc() argument 582 return mempool_alloc(sgp->pool, gfp_mask); in scsi_sg_alloc()
|
/linux-4.4.14/drivers/infiniband/core/ |
D | sa_query.c | 515 static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) in ib_nl_send_msg() argument 529 skb = nlmsg_new(len, gfp_mask); in ib_nl_send_msg() 547 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask); in ib_nl_send_msg() 556 static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask) in ib_nl_make_request() argument 575 ret = ib_nl_send_msg(query, gfp_mask); in ib_nl_make_request() 1039 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) in alloc_mad() argument 1055 gfp_mask, in alloc_mad() 1089 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) in send_mad() argument 1091 bool preload = gfpflags_allow_blocking(gfp_mask); in send_mad() 1096 idr_preload(gfp_mask); in send_mad() [all …]
|
D | sa.h | 56 int timeout_ms, gfp_t gfp_mask,
|
D | multicast.c | 563 union ib_gid *mgid, gfp_t gfp_mask) in acquire_group() argument 578 group = kzalloc(sizeof *group, gfp_mask); in acquire_group() 615 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, in ib_sa_join_multicast() argument 629 member = kmalloc(sizeof *member, gfp_mask); in ib_sa_join_multicast() 644 &rec->mgid, gfp_mask); in ib_sa_join_multicast()
|
D | mad.c | 936 size_t mad_size, gfp_t gfp_mask) in alloc_send_rmpp_list() argument 950 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask); in alloc_send_rmpp_list() 954 sizeof (*seg) + seg_size, gfp_mask); in alloc_send_rmpp_list() 987 gfp_t gfp_mask, in ib_create_send_mad() argument 1018 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask); in ib_create_send_mad() 1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask); in ib_create_send_mad()
|
/linux-4.4.14/kernel/power/ |
D | snapshot.c | 97 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument 101 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 107 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 116 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument 118 return (unsigned long)get_image_page(gfp_mask, PG_SAFE); in get_safe_page() 121 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument 125 page = alloc_page(gfp_mask); in alloc_image_page() 191 gfp_t gfp_mask; /* mask for allocating pages */ member 196 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) in chain_init() argument 200 ca->gfp_mask = gfp_mask; in chain_init() [all …]
|
/linux-4.4.14/include/trace/events/ |
D | compaction.h | 227 gfp_t gfp_mask, 230 TP_ARGS(order, gfp_mask, mode), 234 __field(gfp_t, gfp_mask) 240 __entry->gfp_mask = gfp_mask; 246 __entry->gfp_mask,
|
D | vmscan.h | 209 __entry->gfp_flags = sc->gfp_mask;
|
/linux-4.4.14/arch/tile/mm/ |
D | homecache.c | 384 struct page *homecache_alloc_pages(gfp_t gfp_mask, in homecache_alloc_pages() argument 388 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ in homecache_alloc_pages() 389 page = alloc_pages(gfp_mask, order); in homecache_alloc_pages() 396 struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, in homecache_alloc_pages_node() argument 400 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ in homecache_alloc_pages_node() 401 page = alloc_pages_node(nid, gfp_mask, order); in homecache_alloc_pages_node()
|
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/ |
D | ldlm_pool.c | 339 int nr, gfp_t gfp_mask) in ldlm_cli_pool_shrink() argument 425 static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) in ldlm_pool_shrink() argument 430 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); in ldlm_pool_shrink() 813 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) in ldlm_pools_count() argument 820 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) in ldlm_pools_count() 858 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask); in ldlm_pools_count() 866 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) in ldlm_pools_scan() argument 873 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) in ldlm_pools_scan() 904 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); in ldlm_pools_scan() 918 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask); in ldlm_pools_cli_count() [all …]
|
/linux-4.4.14/drivers/staging/android/ |
D | lowmemorykiller.c | 108 sc->nr_to_scan, sc->gfp_mask, other_free, in lowmem_scan() 113 sc->nr_to_scan, sc->gfp_mask); in lowmem_scan() 178 sc->nr_to_scan, sc->gfp_mask, rem); in lowmem_scan()
|
D | ashmem.c | 441 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan() 784 .gfp_mask = GFP_KERNEL, in ashmem_ioctl()
|
/linux-4.4.14/kernel/ |
D | audit.c | 182 gfp_t gfp_mask; member 451 static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask) in kauditd_send_multicast_skb() argument 470 copy = skb_copy(skb, gfp_mask); in kauditd_send_multicast_skb() 474 nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask); in kauditd_send_multicast_skb() 1249 gfp_t gfp_mask, int type) in audit_buffer_alloc() argument 1265 ab = kmalloc(sizeof(*ab), gfp_mask); in audit_buffer_alloc() 1271 ab->gfp_mask = gfp_mask; in audit_buffer_alloc() 1273 ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask); in audit_buffer_alloc() 1358 struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, in audit_log_start() argument 1374 if (gfp_mask & __GFP_DIRECT_RECLAIM) { in audit_log_start() [all …]
|
D | kmod.c | 520 char **envp, gfp_t gfp_mask, in call_usermodehelper_setup() argument 526 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); in call_usermodehelper_setup() 619 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; in call_usermodehelper() local 621 info = call_usermodehelper_setup(path, argv, envp, gfp_mask, in call_usermodehelper()
|
D | kexec_core.c | 143 gfp_t gfp_mask, 282 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() argument 286 pages = alloc_pages(gfp_mask, order); in kimage_alloc_pages() 608 gfp_t gfp_mask, in kimage_alloc_page() argument 648 page = kimage_alloc_pages(gfp_mask, 0); in kimage_alloc_page() 688 if (!(gfp_mask & __GFP_HIGHMEM) && in kimage_alloc_page()
|
D | cpuset.c | 2527 int __cpuset_node_allowed(int node, gfp_t gfp_mask) in __cpuset_node_allowed() argument 2543 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ in __cpuset_node_allowed()
|
D | workqueue.c | 3042 struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) in alloc_workqueue_attrs() argument 3046 attrs = kzalloc(sizeof(*attrs), gfp_mask); in alloc_workqueue_attrs() 3049 if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask)) in alloc_workqueue_attrs()
|
D | cgroup.c | 302 gfp_t gfp_mask) in cgroup_idr_alloc() argument 306 idr_preload(gfp_mask); in cgroup_idr_alloc() 308 ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM); in cgroup_idr_alloc()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 107 static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) in mthca_alloc_icm_pages() argument 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages() 124 int order, gfp_t gfp_mask) in mthca_alloc_icm_coherent() argument 127 gfp_mask); in mthca_alloc_icm_coherent() 138 gfp_t gfp_mask, int coherent) in mthca_alloc_icm() argument 146 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mthca_alloc_icm() 148 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mthca_alloc_icm() 160 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mthca_alloc_icm() 176 cur_order, gfp_mask); in mthca_alloc_icm() 179 cur_order, gfp_mask); in mthca_alloc_icm()
|
D | mthca_memfree.h | 83 gfp_t gfp_mask, int coherent);
|
D | mthca_cmd.h | 252 gfp_t gfp_mask);
|
D | mthca_cmd.c | 608 gfp_t gfp_mask) in mthca_alloc_mailbox() argument 612 mailbox = kmalloc(sizeof *mailbox, gfp_mask); in mthca_alloc_mailbox() 616 mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); in mthca_alloc_mailbox()
|
/linux-4.4.14/drivers/net/wireless/ath/ |
D | main.c | 31 gfp_t gfp_mask) in ath_rxbuf_alloc() argument 49 skb = __dev_alloc_skb(len + common->cachelsz - 1, gfp_mask); in ath_rxbuf_alloc()
|
D | ath.h | 198 gfp_t gfp_mask);
|
/linux-4.4.14/arch/tile/kernel/ |
D | machine_kexec.c | 215 struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages_arch() argument 217 gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; in kimage_alloc_pages_arch() 218 return alloc_pages_node(0, gfp_mask, order); in kimage_alloc_pages_arch()
|
/linux-4.4.14/net/core/ |
D | skbuff.c | 160 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) in __alloc_skb_head() argument 166 gfp_mask & ~__GFP_DMA, node); in __alloc_skb_head() 202 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, in __alloc_skb() argument 215 gfp_mask |= __GFP_MEMALLOC; in __alloc_skb() 218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb() 230 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); in __alloc_skb() 355 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) in __netdev_alloc_frag() argument 363 data = __alloc_page_frag(nc, fragsz, gfp_mask); in __netdev_alloc_frag() 381 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) in __napi_alloc_frag() argument 385 return __alloc_page_frag(nc, fragsz, gfp_mask); in __napi_alloc_frag() [all …]
|
/linux-4.4.14/net/ceph/ |
D | msgpool.c | 10 static void *msgpool_alloc(gfp_t gfp_mask, void *arg) in msgpool_alloc() argument 15 msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true); in msgpool_alloc()
|
/linux-4.4.14/include/rdma/ |
D | ib_sa.h | 311 int timeout_ms, gfp_t gfp_mask, 323 int timeout_ms, gfp_t gfp_mask, 369 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 432 int timeout_ms, gfp_t gfp_mask,
|
D | ib_mad.h | 746 gfp_t gfp_mask,
|
/linux-4.4.14/drivers/md/ |
D | dm-bufio.c | 369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, in alloc_buffer_data() argument 377 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); in alloc_buffer_data() 381 gfp_mask & __GFP_NORETRY) { in alloc_buffer_data() 383 return (void *)__get_free_pages(gfp_mask, in alloc_buffer_data() 399 if (gfp_mask & __GFP_NORETRY) in alloc_buffer_data() 402 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL); in alloc_buffer_data() 404 if (gfp_mask & __GFP_NORETRY) in alloc_buffer_data() 439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask) in alloc_buffer() argument 442 gfp_mask); in alloc_buffer() 449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode); in alloc_buffer() [all …]
|
D | dm.c | 646 gfp_t gfp_mask) in alloc_rq_tio() argument 648 return mempool_alloc(md->io_pool, gfp_mask); in alloc_rq_tio() 657 gfp_t gfp_mask) in alloc_clone_request() argument 659 return mempool_alloc(md->rq_pool, gfp_mask); in alloc_clone_request() 1821 struct dm_rq_target_io *tio, gfp_t gfp_mask) in setup_clone() argument 1825 r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, in setup_clone() 1842 struct dm_rq_target_io *tio, gfp_t gfp_mask) in clone_rq() argument 1852 clone = alloc_clone_request(md, gfp_mask); in clone_rq() 1859 if (setup_clone(clone, rq, tio, gfp_mask)) { in clone_rq() 1885 struct mapped_device *md, gfp_t gfp_mask) in prep_tio() argument [all …]
|
D | md.h | 672 extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 674 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
D | dm-crypt.c | 992 gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; in crypt_alloc_buffer() local 998 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) in crypt_alloc_buffer() 1010 page = mempool_alloc(cc->page_pool, gfp_mask); in crypt_alloc_buffer() 1014 gfp_mask |= __GFP_DIRECT_RECLAIM; in crypt_alloc_buffer() 1031 if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM)) in crypt_alloc_buffer()
|
/linux-4.4.14/include/linux/sunrpc/ |
D | gss_api.h | 52 gfp_t gfp_mask); 110 gfp_t gfp_mask);
|
D | gss_krb5.h | 299 gfp_t gfp_mask);
|
/linux-4.4.14/security/integrity/ima/ |
D | ima_crypto.c | 129 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY; in ima_alloc_pages() local 135 ptr = (void *)__get_free_pages(gfp_mask, order); in ima_alloc_pages() 144 gfp_mask = GFP_KERNEL; in ima_alloc_pages() 147 gfp_mask |= __GFP_NOWARN; in ima_alloc_pages() 149 ptr = (void *)__get_free_pages(gfp_mask, 0); in ima_alloc_pages()
|
/linux-4.4.14/Documentation/connector/ |
D | connector.txt | 27 void cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __group, int gfp_mask); 28 void cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __group, int gfp_mask); 75 int cn_netlink_send_multi(struct cn_msg *msg, u16 len, u32 portid, u32 __groups, int gfp_mask); 76 int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 __groups, int gfp_mask); 95 int gfp_mask - GFP mask.
|
/linux-4.4.14/fs/nfs/ |
D | nfs4session.c | 103 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_new_slot() argument 107 slot = kzalloc(sizeof(*slot), gfp_mask); in nfs4_new_slot() 117 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_find_or_create_slot() argument 125 seq_init, gfp_mask); in nfs4_find_or_create_slot()
|
D | pnfs_dev.c | 189 gfp_t gfp_mask) in nfs4_find_get_deviceid() argument 198 new = nfs4_get_device_info(server, id, cred, gfp_mask); in nfs4_find_get_deviceid()
|
D | nfs4_fs.h | 247 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait); 444 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
D | nfs4state.c | 744 fmode_t fmode, gfp_t gfp_mask, int wait) in __nfs4_close() argument 785 nfs4_do_close(state, gfp_mask, wait); in __nfs4_close() 1014 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) in nfs_alloc_seqid() argument 1018 new = kmalloc(sizeof(*new), gfp_mask); in nfs_alloc_seqid()
|
D | pnfs.h | 329 gfp_t gfp_mask);
|
D | nfs4proc.c | 1076 gfp_t gfp_mask) in nfs4_opendata_alloc() argument 1084 p = kzalloc(sizeof(*p), gfp_mask); in nfs4_opendata_alloc() 1088 p->f_label = nfs4_label_alloc(server, gfp_mask); in nfs4_opendata_alloc() 1092 p->a_label = nfs4_label_alloc(server, gfp_mask); in nfs4_opendata_alloc() 1097 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); in nfs4_opendata_alloc() 2925 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) in nfs4_do_close() argument 2948 calldata = kzalloc(sizeof(*calldata), gfp_mask); in nfs4_do_close() 2957 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); in nfs4_do_close() 5722 gfp_t gfp_mask) in nfs4_alloc_lockdata() argument 5729 p = kzalloc(sizeof(*p), gfp_mask); in nfs4_alloc_lockdata() [all …]
|
D | dir.c | 2144 gfp_t gfp_mask = sc->gfp_mask; in nfs_access_cache_scan() local 2146 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) in nfs_access_cache_scan()
|
/linux-4.4.14/drivers/net/wireless/ipw2x00/ |
D | libipw_tx.c | 192 int headroom, gfp_t gfp_mask) in libipw_alloc_txb() argument 197 gfp_mask); in libipw_alloc_txb() 207 gfp_mask); in libipw_alloc_txb()
|
/linux-4.4.14/drivers/staging/fwserial/ |
D | dma_fifo.h | 86 int tx_limit, int open_limit, gfp_t gfp_mask);
|
D | dma_fifo.c | 68 int tx_limit, int open_limit, gfp_t gfp_mask) in dma_fifo_alloc() argument 77 fifo->data = kmalloc(capacity, gfp_mask); in dma_fifo_alloc()
|
/linux-4.4.14/drivers/usb/wusbcore/ |
D | wa-hc.h | 270 static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask) in wa_nep_arm() argument 275 return usb_submit_urb(urb, gfp_mask); in wa_nep_arm()
|
/linux-4.4.14/fs/jbd2/ |
D | transaction.c | 272 gfp_t gfp_mask) in start_this_handle() argument 303 if ((gfp_mask & __GFP_FS) == 0) in start_this_handle() 304 gfp_mask |= __GFP_NOFAIL; in start_this_handle() 306 gfp_mask); in start_this_handle() 427 gfp_t gfp_mask, unsigned int type, in jbd2__journal_start() argument 458 err = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_start() 634 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) in jbd2__journal_restart() argument 677 ret = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_restart() 1978 struct page *page, gfp_t gfp_mask) in jbd2_journal_try_to_free_buffers() argument
|
D | revoke.c | 144 gfp_t gfp_mask = GFP_NOFS; in insert_revoke_hash() local 147 gfp_mask |= __GFP_NOFAIL; in insert_revoke_hash() 148 record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask); in insert_revoke_hash()
|
/linux-4.4.14/security/selinux/ss/ |
D | mls.h | 40 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
|
D | mls.c | 377 int mls_from_string(char *str, struct context *context, gfp_t gfp_mask) in mls_from_string() argument 387 tmpstr = freestr = kstrdup(str, gfp_mask); in mls_from_string()
|
/linux-4.4.14/drivers/tty/ |
D | sysrq.c | 356 const gfp_t gfp_mask = GFP_KERNEL; in moom_callback() local 358 .zonelist = node_zonelist(first_memory_node, gfp_mask), in moom_callback() 360 .gfp_mask = gfp_mask, in moom_callback()
|
/linux-4.4.14/fs/ |
D | mbcache.c | 191 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) in __mb_cache_entry_forget() argument 253 gfp_t gfp_mask = sc->gfp_mask; in mb_cache_shrink_scan() local 284 __mb_cache_entry_forget(entry, gfp_mask); in mb_cache_shrink_scan()
|
D | buffer.c | 1000 gfp_t gfp_mask; in grow_dev_page() local 1002 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; in grow_dev_page() 1010 gfp_mask |= __GFP_NOFAIL; in grow_dev_page() 1012 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
|
D | super.c | 71 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()
|
/linux-4.4.14/include/linux/netfilter/ |
D | nfnetlink.h | 38 u32 dst_portid, gfp_t gfp_mask);
|
/linux-4.4.14/fs/nilfs2/ |
D | mdt.h | 89 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
|
D | mdt.c | 479 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) in nilfs_mdt_init() argument 491 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); in nilfs_mdt_init()
|
/linux-4.4.14/arch/um/drivers/ |
D | net_kern.c | 391 struct transport *transport, gfp_t gfp_mask) in eth_configure() argument 400 device = kzalloc(sizeof(*device), gfp_mask); in eth_configure() 571 void **init_out, char **mac_out, gfp_t gfp_mask) in check_transport() argument 585 *init_out = kmalloc(transport->setup_size, gfp_mask); in check_transport()
|
/linux-4.4.14/arch/s390/pci/ |
D | pci_clp.c | 49 static void *clp_alloc_block(gfp_t gfp_mask) in clp_alloc_block() argument 51 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE)); in clp_alloc_block()
|
/linux-4.4.14/fs/gfs2/ |
D | inode.h | 18 extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
|
D | aops.c | 1120 int gfs2_releasepage(struct page *page, gfp_t gfp_mask) in gfs2_releasepage() argument
|
/linux-4.4.14/drivers/net/wireless/ |
D | zd1201.c | 521 gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; in zd1201_setconfig() local 528 request = kmalloc(16, gfp_mask); in zd1201_setconfig() 531 urb = usb_alloc_urb(0, gfp_mask); in zd1201_setconfig() 557 err = usb_submit_urb(urb, gfp_mask); in zd1201_setconfig() 562 request = kmalloc(16, gfp_mask); in zd1201_setconfig() 565 urb = usb_alloc_urb(0, gfp_mask); in zd1201_setconfig() 578 err = usb_submit_urb(urb, gfp_mask); in zd1201_setconfig()
|
/linux-4.4.14/fs/jfs/ |
D | jfs_metapage.c | 186 static inline struct metapage *alloc_metapage(gfp_t gfp_mask) in alloc_metapage() argument 188 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask); in alloc_metapage() 539 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) in metapage_releasepage() argument
|
/linux-4.4.14/net/netfilter/ |
D | nfnetlink.c | 126 u32 dst_portid, gfp_t gfp_mask) in nfnetlink_alloc_skb() argument 128 return netlink_alloc_skb(net->nfnl, size, dst_portid, gfp_mask); in nfnetlink_alloc_skb()
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/pcie/ |
D | rx.c | 290 gfp_t gfp_mask = priority; in iwl_pcie_rx_alloc_page() local 293 gfp_mask |= __GFP_NOWARN; in iwl_pcie_rx_alloc_page() 296 gfp_mask |= __GFP_COMP; in iwl_pcie_rx_alloc_page() 299 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); in iwl_pcie_rx_alloc_page()
|
/linux-4.4.14/kernel/locking/ |
D | lockdep.c | 2733 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) in __lockdep_trace_alloc() argument 2741 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) in __lockdep_trace_alloc() 2745 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) in __lockdep_trace_alloc() 2749 if (!(gfp_mask & __GFP_FS)) in __lockdep_trace_alloc() 2763 void lockdep_trace_alloc(gfp_t gfp_mask) in lockdep_trace_alloc() argument 2773 __lockdep_trace_alloc(gfp_mask, flags); in lockdep_trace_alloc() 2889 void lockdep_trace_alloc(gfp_t gfp_mask) in lockdep_trace_alloc() argument 3665 void lockdep_set_current_reclaim_state(gfp_t gfp_mask) in lockdep_set_current_reclaim_state() argument 3667 current->lockdep_reclaim_gfp = gfp_mask; in lockdep_set_current_reclaim_state()
|
/linux-4.4.14/drivers/uwb/ |
D | uwb-internal.h | 234 extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask);
|
/linux-4.4.14/drivers/staging/rtl8192u/ieee80211/ |
D | ieee80211_tx.c | 241 gfp_t gfp_mask) in ieee80211_alloc_txb() argument 247 gfp_mask); in ieee80211_alloc_txb()
|
/linux-4.4.14/drivers/base/ |
D | devres.c | 947 gfp_t gfp_mask, unsigned int order) in devm_get_free_pages() argument 952 addr = __get_free_pages(gfp_mask, order); in devm_get_free_pages()
|
/linux-4.4.14/drivers/scsi/aic94xx/ |
D | aic94xx_hwi.h | 382 gfp_t gfp_mask);
|
/linux-4.4.14/fs/xfs/ |
D | xfs_iops.c | 1222 gfp_t gfp_mask; in xfs_setup_inode() local 1291 gfp_mask = mapping_gfp_mask(inode->i_mapping); in xfs_setup_inode() 1292 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
|
D | xfs_buf.c | 292 gfp_t gfp_mask = xb_to_gfp(flags); in xfs_buf_allocate_memory() local 341 page = alloc_page(gfp_mask); in xfs_buf_allocate_memory() 359 __func__, gfp_mask); in xfs_buf_allocate_memory()
|
/linux-4.4.14/drivers/staging/rtl8192e/ |
D | rtllib_tx.c | 219 gfp_t gfp_mask) in rtllib_alloc_txb() argument 225 gfp_mask); in rtllib_alloc_txb()
|
/linux-4.4.14/drivers/isdn/mISDN/ |
D | socket.c | 44 _l2_alloc_skb(unsigned int len, gfp_t gfp_mask) in _l2_alloc_skb() argument 48 skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask); in _l2_alloc_skb()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 117 static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask) in ll_releasepage() argument
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 547 u16 index, gfp_t gfp_mask) in bnx2x_alloc_rx_sge() argument 562 pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); in bnx2x_alloc_rx_sge() 690 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask) in bnx2x_frag_alloc() argument 694 if (unlikely(gfpflags_allow_blocking(gfp_mask))) in bnx2x_frag_alloc() 695 return (void *)__get_free_page(gfp_mask); in bnx2x_frag_alloc() 700 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); in bnx2x_frag_alloc() 833 u16 index, gfp_t gfp_mask) in bnx2x_alloc_rx_data() argument 840 data = bnx2x_frag_alloc(fp, gfp_mask); in bnx2x_alloc_rx_data()
|
/linux-4.4.14/net/netlink/ |
D | af_netlink.c | 129 gfp_t gfp_mask) in netlink_to_full_skb() argument 134 new = alloc_skb(len, gfp_mask); in netlink_to_full_skb() 1881 gfp_t gfp_mask) in __netlink_alloc_skb() argument 1906 skb = alloc_skb_head(gfp_mask); in __netlink_alloc_skb() 1948 return alloc_skb(size, gfp_mask); in __netlink_alloc_skb()
|
/linux-4.4.14/Documentation/scsi/ |
D | libsas.txt | 306 @gfp_mask is the gfp_mask defining the context of the caller.
|
/linux-4.4.14/drivers/firewire/ |
D | core-cdev.c | 487 struct client_resource *resource, gfp_t gfp_mask) in add_client_resource() argument 489 bool preload = gfpflags_allow_blocking(gfp_mask); in add_client_resource() 494 idr_preload(gfp_mask); in add_client_resource()
|
/linux-4.4.14/drivers/net/wireless/realtek/rtlwifi/ |
D | usb.c | 426 struct urb *urb, gfp_t gfp_mask) in _rtl_prep_rx_urb() argument 431 buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask, in _rtl_prep_rx_urb()
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
D | lustre_dlm.h | 212 gfp_t gfp_mask);
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | mcg.c | 815 gfp_t gfp_mask) in acquire_group() argument 831 group = kzalloc(sizeof *group, gfp_mask); in acquire_group()
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 1560 gfp_t gfp_mask; in echo_client_kbrw() local 1567 gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_KERNEL : GFP_HIGHUSER; in echo_client_kbrw() 1600 pgp->pg = alloc_page(gfp_mask); in echo_client_kbrw()
|
/linux-4.4.14/fs/ecryptfs/ |
D | crypto.c | 1160 static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask, in ecryptfs_get_zeroed_pages() argument 1165 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in ecryptfs_get_zeroed_pages()
|
/linux-4.4.14/drivers/net/ethernet/sgi/ |
D | ioc3-eth.c | 125 unsigned int gfp_mask) in ioc3_alloc_skb() argument 129 skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask); in ioc3_alloc_skb()
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
D | lu_object.c | 1753 if (!(sc->gfp_mask & __GFP_FS)) in lu_cache_shrink_count() 1777 if (!(sc->gfp_mask & __GFP_FS)) in lu_cache_shrink_scan()
|
/linux-4.4.14/drivers/net/wireless/iwlegacy/ |
D | 3945-mac.c | 1011 gfp_t gfp_mask = priority; in il3945_rx_allocate() local 1022 gfp_mask |= __GFP_NOWARN; in il3945_rx_allocate() 1025 gfp_mask |= __GFP_COMP; in il3945_rx_allocate() 1028 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); in il3945_rx_allocate()
|
/linux-4.4.14/net/sunrpc/ |
D | auth.c | 509 if ((sc->gfp_mask & GFP_KERNEL) != GFP_KERNEL) in rpcauth_cache_shrink_scan()
|
/linux-4.4.14/drivers/md/bcache/ |
D | sysfs.c | 622 sc.gfp_mask = GFP_KERNEL; in STORE()
|
/linux-4.4.14/drivers/net/ |
D | virtio_net.c | 203 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument 212 p = alloc_page(gfp_mask); in get_a_page()
|
/linux-4.4.14/net/ipv4/ |
D | tcp_output.c | 904 gfp_t gfp_mask) in tcp_transmit_skb() argument 922 skb = pskb_copy(skb, gfp_mask); in tcp_transmit_skb() 924 skb = skb_clone(skb, gfp_mask); in tcp_transmit_skb()
|
/linux-4.4.14/Documentation/block/ |
D | data-integrity.txt | 216 struct bip * bio_integrity_alloc(bio, gfp_mask, nr_pages);
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_driver.c | 1069 gfp_t gfp_mask) in ipath_alloc_skb() argument 1096 skb = __dev_alloc_skb(len, gfp_mask); in ipath_alloc_skb()
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 210 gfp_t gfp_mask, in srp_alloc_iu() argument 215 iu = kmalloc(sizeof *iu, gfp_mask); in srp_alloc_iu() 219 iu->buf = kzalloc(size, gfp_mask); in srp_alloc_iu()
|
/linux-4.4.14/drivers/block/drbd/ |
D | drbd_main.c | 149 struct bio *bio_alloc_drbd(gfp_t gfp_mask) in bio_alloc_drbd() argument 154 return bio_alloc(gfp_mask, 1); in bio_alloc_drbd() 156 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); in bio_alloc_drbd()
|
D | drbd_receiver.c | 342 unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local) in drbd_alloc_peer_req() argument 352 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); in drbd_alloc_peer_req() 354 if (!(gfp_mask & __GFP_NOWARN)) in drbd_alloc_peer_req() 361 gfpflags_allow_blocking(gfp_mask)); in drbd_alloc_peer_req()
|