Home
last modified time | relevance | path

Searched refs:__GFP_WAIT (Results 1 – 83 of 83) sorted by relevance

/linux-4.1.27/include/linux/
Dgfp.h71 #define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ macro
111 #define GFP_NOIO (__GFP_WAIT)
112 #define GFP_NOFS (__GFP_WAIT | __GFP_IO)
113 #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
114 #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
116 #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
128 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
133 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
Dskbuff.h1101 might_sleep_if(pri & __GFP_WAIT); in skb_unclone()
1185 might_sleep_if(pri & __GFP_WAIT); in skb_share_check()
1221 might_sleep_if(pri & __GFP_WAIT); in skb_unshare()
/linux-4.1.27/include/trace/events/
Dgfpflags.h23 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
/linux-4.1.27/mm/
Dfailslab.c19 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) in should_failslab()
Dmempool.c320 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc()
326 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); in mempool_alloc()
359 if (!(gfp_mask & __GFP_WAIT)) { in mempool_alloc()
Dslab.c1038 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT; in gfp_exact_node()
2634 if (local_flags & __GFP_WAIT) in cache_grow()
2664 if (local_flags & __GFP_WAIT) in cache_grow()
2678 if (local_flags & __GFP_WAIT) in cache_grow()
2870 might_sleep_if(flags & __GFP_WAIT); in cache_alloc_debugcheck_before()
3058 if (local_flags & __GFP_WAIT) in fallback_alloc()
3062 if (local_flags & __GFP_WAIT) in fallback_alloc()
Ddmapool.c326 might_sleep_if(mem_flags & __GFP_WAIT); in dma_pool_alloc()
Dmigrate.c1566 ~__GFP_WAIT, 0); in alloc_misplaced_dst_page()
1740 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, in migrate_misplaced_transhuge_page()
Dpage_alloc.c1825 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page()
2328 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) in warn_alloc_failed()
2623 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); in gfp_to_alloc_flags()
2677 const gfp_t wait = gfp_mask & __GFP_WAIT; in __alloc_pages_slowpath()
2887 might_sleep_if(gfp_mask & __GFP_WAIT); in __alloc_pages_nodemask()
Dmemcontrol.c2267 if (!(gfp_mask & __GFP_WAIT)) in try_charge()
2326 if (!(gfp_mask & __GFP_WAIT)) in try_charge()
4593 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count); in mem_cgroup_do_precharge()
Dslub.c1266 might_sleep_if(flags & __GFP_WAIT); in slab_pre_alloc_hook()
1342 if (flags & __GFP_WAIT) in allocate_slab()
1383 if (flags & __GFP_WAIT) in allocate_slab()
Dvmalloc.c1620 if (gfp_mask & __GFP_WAIT) in __vmalloc_area_node()
Dvmscan.c3729 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) in zone_reclaim()
Dhuge_memory.c770 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; in alloc_hugepage_gfpmask()
/linux-4.1.27/drivers/ide/
Dide-pm.c21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend()
61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume()
Dide-ioctls.c128 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cmd_ioctl()
224 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_drive_reset()
Dide-devsets.c168 rq = blk_get_request(q, READ, __GFP_WAIT); in ide_devset_execute()
Dide-park.c34 rq = blk_get_request(q, READ, __GFP_WAIT); in issue_park_cmd()
Dide-taskfile.c433 rq = blk_get_request(drive->queue, rw, __GFP_WAIT); in ide_raw_taskfile()
444 nsect * SECTOR_SIZE, __GFP_WAIT); in ide_raw_taskfile()
Dide-cd_ioctl.c306 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cdrom_reset()
Dide-disk.c480 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in set_multcount()
Dide-atapi.c95 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_queue_pc_tail()
Dide-tape.c855 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in idetape_queue_rw_tail()
863 __GFP_WAIT); in idetape_queue_rw_tail()
Dide-cd.c444 rq = blk_get_request(drive->queue, write, __GFP_WAIT); in ide_cd_queue_pc()
/linux-4.1.27/fs/nilfs2/
Dmdt.h75 #define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
/linux-4.1.27/kernel/power/
Dblock_io.c34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); in submit()
Dswap.c286 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | in write_page()
294 src = (void *)__get_free_page(__GFP_WAIT | in write_page()
599 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); in save_image_lzo()
903 __get_free_page(__GFP_WAIT | __GFP_HIGH); in get_swap_reader()
1167 __GFP_WAIT | __GFP_HIGH : in load_image_lzo()
1168 __GFP_WAIT | __GFP_NOWARN | in load_image_lzo()
/linux-4.1.27/fs/ext4/
Dcrypto.c424 GFP_NOFS | __GFP_WAIT); in ext4_encrypt()
504 GFP_NOFS | __GFP_WAIT); in ext4_encrypted_zeroout()
Dsuper.c1048 wait & ~__GFP_WAIT); in bdev_try_to_free_page()
/linux-4.1.27/block/
Dscsi_ioctl.c447 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); in sg_scsi_ioctl()
498 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { in sg_scsi_ioctl()
539 rq = blk_get_request(q, WRITE, __GFP_WAIT); in __blk_send_generic()
Dblk-ioc.c292 might_sleep_if(gfp_flags & __GFP_WAIT); in get_task_io_context()
Dbio.c214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); in bvec_alloc()
228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { in bvec_alloc()
455 gfp_mask &= ~__GFP_WAIT; in bio_alloc_bioset()
Dblk-mq.c88 if (!(gfp & __GFP_WAIT)) in blk_mq_queue_enter()
268 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, in blk_mq_alloc_request()
272 if (!rq && (gfp & __GFP_WAIT)) { in blk_mq_alloc_request()
1228 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); in blk_mq_map_request()
Dblk-mq-tag.c267 if (!(data->gfp & __GFP_WAIT)) in bt_get()
Dcfq-iosched.c3609 } else if (gfp_mask & __GFP_WAIT) { in cfq_find_alloc_queue()
4224 might_sleep_if(gfp_mask & __GFP_WAIT); in cfq_set_request()
Dblk-core.c1165 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { in get_request()
/linux-4.1.27/arch/arm64/mm/
Ddma-mapping.c103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { in __dma_alloc_coherent()
150 if (!coherent && !(flags & __GFP_WAIT)) { in __dma_alloc()
/linux-4.1.27/arch/x86/kernel/
Dpci-dma.c104 if (flag & __GFP_WAIT) { in dma_generic_alloc_coherent()
/linux-4.1.27/Documentation/vm/
Dbalance3 Memory balancing is needed for non __GFP_WAIT as well as for non
6 There are two reasons to be requesting non __GFP_WAIT allocations:
/linux-4.1.27/drivers/connector/
Dconnector.c127 return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); in cn_netlink_send_mult()
/linux-4.1.27/lib/
Dradix-tree.c190 if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) { in radix_tree_node_alloc()
289 WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT)); in radix_tree_preload()
301 if (gfp_mask & __GFP_WAIT) in radix_tree_maybe_preload()
Didr.c402 might_sleep_if(gfp_mask & __GFP_WAIT); in idr_preload()
456 might_sleep_if(gfp_mask & __GFP_WAIT); in idr_alloc()
/linux-4.1.27/fs/cachefiles/
Dinternal.h33 #define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/
Dlibcfs_private.h120 ((mask) & __GFP_WAIT) == 0)); \
/linux-4.1.27/drivers/staging/android/ion/
Dion_system_heap.c30 __GFP_NORETRY) & ~__GFP_WAIT;
/linux-4.1.27/kernel/
Dsmp.c672 might_sleep_if(gfp_flags & __GFP_WAIT); in on_each_cpu_cond()
Daudit.c1360 if (gfp_mask & __GFP_WAIT) { in audit_log_start()
1362 gfp_mask &= ~__GFP_WAIT; in audit_log_start()
1369 if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) { in audit_log_start()
/linux-4.1.27/fs/fscache/
Dpage.c125 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
135 gfp &= ~__GFP_WAIT; in __fscache_maybe_release_page()
Dcookie.c114 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT); in __fscache_acquire_cookie()
/linux-4.1.27/security/integrity/ima/
Dima_crypto.c129 gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY; in ima_alloc_pages()
/linux-4.1.27/drivers/block/
Dosdblk.c274 gfpmask &= ~__GFP_WAIT; in bio_chain_clone()
Dpktcdvd.c706 WRITE : READ, __GFP_WAIT); in pkt_generic_packet()
713 __GFP_WAIT); in pkt_generic_packet()
Dnvme-core.c1092 req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT), in nvme_submit_io_cmd()
1883 (GFP_KERNEL|__GFP_WAIT), false); in nvme_user_cmd()
/linux-4.1.27/drivers/md/
Ddm-crypt.c984 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer()
1000 gfp_mask |= __GFP_WAIT; in crypt_alloc_buffer()
1017 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer()
/linux-4.1.27/fs/btrfs/
Dextent_io.c597 if (!prealloc && (mask & __GFP_WAIT)) { in clear_extent_bit()
721 if (mask & __GFP_WAIT) in clear_extent_bit()
853 if (!prealloc && (mask & __GFP_WAIT)) { in __set_extent_bit()
1031 if (mask & __GFP_WAIT) in __set_extent_bit()
1079 if (!prealloc && (mask & __GFP_WAIT)) { in convert_extent_bit()
1256 if (mask & __GFP_WAIT) in convert_extent_bit()
4317 if ((mask & __GFP_WAIT) && in try_release_extent_mapping()
Dvolumes.c156 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT); in __alloc_device()
157 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT); in __alloc_device()
Ddisk-io.c2546 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); in open_ctree()
/linux-4.1.27/drivers/block/paride/
Dpd.c724 rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); in pd_special_command()
/linux-4.1.27/fs/nfs/
Dfile.c494 if ((gfp & __GFP_WAIT) && in nfs_release_page()
/linux-4.1.27/fs/xfs/
Dxfs_qm.c528 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) in xfs_qm_shrink_scan()
/linux-4.1.27/arch/arm/mm/
Ddma-mapping.c650 else if (!(gfp & __GFP_WAIT)) in __dma_alloc()
1354 if (!(gfp & __GFP_WAIT)) in arm_iommu_alloc_attrs()
/linux-4.1.27/net/rxrpc/
Dar-connection.c503 if (!(gfp & __GFP_WAIT)) { in rxrpc_connect_call()
/linux-4.1.27/drivers/scsi/
Dscsi_lib.c224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); in scsi_execute()
230 buffer, bufflen, __GFP_WAIT)) in scsi_execute()
/linux-4.1.27/drivers/mtd/
Dmtdcore.c1183 gfp_t flags = __GFP_NOWARN | __GFP_WAIT | in mtd_kmalloc_up_to()
/linux-4.1.27/net/core/
Dskbuff.c491 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { in __alloc_rx_skb()
4391 if (gfp_head & __GFP_WAIT) in alloc_skb_with_frags()
4406 page = alloc_pages((gfp_mask & ~__GFP_WAIT) | in alloc_skb_with_frags()
Dsock.c1882 pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | in skb_page_frag_refill()
/linux-4.1.27/drivers/infiniband/core/
Dsa_query.c621 bool preload = !!(gfp_mask & __GFP_WAIT); in send_mad()
/linux-4.1.27/net/sctp/
Dassociola.c1591 bool preload = !!(gfp & __GFP_WAIT); in sctp_assoc_set_id()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_bitmap.c1019 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); in bm_page_io_async()
Ddrbd_receiver.c360 page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); in drbd_alloc_peer_req()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_init.c1683 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in qib_setup_eagerbufs()
/linux-4.1.27/include/net/
Dsock.h2049 if (sk->sk_allocation & __GFP_WAIT) in sk_page_frag()
/linux-4.1.27/drivers/firewire/
Dcore-cdev.c489 bool preload = !!(gfp_mask & __GFP_WAIT); in add_client_resource()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_file_ops.c908 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in ipath_create_user_egr()
/linux-4.1.27/fs/ext3/
Dsuper.c753 wait & ~__GFP_WAIT); in bdev_try_to_free_page()
/linux-4.1.27/kernel/locking/
Dlockdep.c2741 if (!(gfp_mask & __GFP_WAIT)) in __lockdep_trace_alloc()
/linux-4.1.27/net/netlink/
Daf_netlink.c2075 if (info.congested && (allocation & __GFP_WAIT)) in netlink_broadcast_filtered()
/linux-4.1.27/drivers/iommu/
Damd_iommu.c2943 if (!(flag & __GFP_WAIT)) in alloc_coherent()
Dintel-iommu.c3270 if (flags & __GFP_WAIT) { in intel_alloc_coherent()
/linux-4.1.27/drivers/usb/host/
Du132-hcd.c2250 if (__GFP_WAIT & mem_flags) { in u132_urb_enqueue()
/linux-4.1.27/drivers/block/mtip32xx/
Dmtip32xx.c182 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); in mtip_get_int_command()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_cmn.c674 if (unlikely(gfp_mask & __GFP_WAIT)) in bnx2x_frag_alloc()
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem.c2072 gfp &= ~(__GFP_IO | __GFP_WAIT); in i915_gem_object_get_pages_gtt()