/linux-4.1.27/include/linux/ |
D | gfp.h | 71 #define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ macro 111 #define GFP_NOIO (__GFP_WAIT) 112 #define GFP_NOFS (__GFP_WAIT | __GFP_IO) 113 #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 114 #define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ 116 #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 128 #define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ 133 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
|
D | skbuff.h | 1101 might_sleep_if(pri & __GFP_WAIT); in skb_unclone() 1185 might_sleep_if(pri & __GFP_WAIT); in skb_share_check() 1221 might_sleep_if(pri & __GFP_WAIT); in skb_unshare()
|
/linux-4.1.27/include/trace/events/ |
D | gfpflags.h | 23 {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
|
/linux-4.1.27/mm/ |
D | failslab.c | 19 if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) in should_failslab()
|
D | mempool.c | 320 might_sleep_if(gfp_mask & __GFP_WAIT); in mempool_alloc() 326 gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); in mempool_alloc() 359 if (!(gfp_mask & __GFP_WAIT)) { in mempool_alloc()
|
D | slab.c | 1038 return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT; in gfp_exact_node() 2634 if (local_flags & __GFP_WAIT) in cache_grow() 2664 if (local_flags & __GFP_WAIT) in cache_grow() 2678 if (local_flags & __GFP_WAIT) in cache_grow() 2870 might_sleep_if(flags & __GFP_WAIT); in cache_alloc_debugcheck_before() 3058 if (local_flags & __GFP_WAIT) in fallback_alloc() 3062 if (local_flags & __GFP_WAIT) in fallback_alloc()
|
D | dmapool.c | 326 might_sleep_if(mem_flags & __GFP_WAIT); in dma_pool_alloc()
|
D | migrate.c | 1566 ~__GFP_WAIT, 0); in alloc_misplaced_dst_page() 1740 (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, in migrate_misplaced_transhuge_page()
|
D | page_alloc.c | 1825 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) in should_fail_alloc_page() 2328 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) in warn_alloc_failed() 2623 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); in gfp_to_alloc_flags() 2677 const gfp_t wait = gfp_mask & __GFP_WAIT; in __alloc_pages_slowpath() 2887 might_sleep_if(gfp_mask & __GFP_WAIT); in __alloc_pages_nodemask()
|
D | memcontrol.c | 2267 if (!(gfp_mask & __GFP_WAIT)) in try_charge() 2326 if (!(gfp_mask & __GFP_WAIT)) in try_charge() 4593 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count); in mem_cgroup_do_precharge()
|
D | slub.c | 1266 might_sleep_if(flags & __GFP_WAIT); in slab_pre_alloc_hook() 1342 if (flags & __GFP_WAIT) in allocate_slab() 1383 if (flags & __GFP_WAIT) in allocate_slab()
|
D | vmalloc.c | 1620 if (gfp_mask & __GFP_WAIT) in __vmalloc_area_node()
|
D | vmscan.c | 3729 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) in zone_reclaim()
|
D | huge_memory.c | 770 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp; in alloc_hugepage_gfpmask()
|
/linux-4.1.27/drivers/ide/ |
D | ide-pm.c | 21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend() 61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume()
|
D | ide-ioctls.c | 128 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cmd_ioctl() 224 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_drive_reset()
|
D | ide-devsets.c | 168 rq = blk_get_request(q, READ, __GFP_WAIT); in ide_devset_execute()
|
D | ide-park.c | 34 rq = blk_get_request(q, READ, __GFP_WAIT); in issue_park_cmd()
|
D | ide-taskfile.c | 433 rq = blk_get_request(drive->queue, rw, __GFP_WAIT); in ide_raw_taskfile() 444 nsect * SECTOR_SIZE, __GFP_WAIT); in ide_raw_taskfile()
|
D | ide-cd_ioctl.c | 306 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cdrom_reset()
|
D | ide-disk.c | 480 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in set_multcount()
|
D | ide-atapi.c | 95 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_queue_pc_tail()
|
D | ide-tape.c | 855 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in idetape_queue_rw_tail() 863 __GFP_WAIT); in idetape_queue_rw_tail()
|
D | ide-cd.c | 444 rq = blk_get_request(drive->queue, write, __GFP_WAIT); in ide_cd_queue_pc()
|
/linux-4.1.27/fs/nilfs2/ |
D | mdt.h | 75 #define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM)
|
/linux-4.1.27/kernel/power/ |
D | block_io.c | 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); in submit()
|
D | swap.c | 286 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | in write_page() 294 src = (void *)__get_free_page(__GFP_WAIT | in write_page() 599 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); in save_image_lzo() 903 __get_free_page(__GFP_WAIT | __GFP_HIGH); in get_swap_reader() 1167 __GFP_WAIT | __GFP_HIGH : in load_image_lzo() 1168 __GFP_WAIT | __GFP_NOWARN | in load_image_lzo()
|
/linux-4.1.27/fs/ext4/ |
D | crypto.c | 424 GFP_NOFS | __GFP_WAIT); in ext4_encrypt() 504 GFP_NOFS | __GFP_WAIT); in ext4_encrypted_zeroout()
|
D | super.c | 1048 wait & ~__GFP_WAIT); in bdev_try_to_free_page()
|
/linux-4.1.27/block/ |
D | scsi_ioctl.c | 447 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); in sg_scsi_ioctl() 498 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { in sg_scsi_ioctl() 539 rq = blk_get_request(q, WRITE, __GFP_WAIT); in __blk_send_generic()
|
D | blk-ioc.c | 292 might_sleep_if(gfp_flags & __GFP_WAIT); in get_task_io_context()
|
D | bio.c | 214 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); in bvec_alloc() 228 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { in bvec_alloc() 455 gfp_mask &= ~__GFP_WAIT; in bio_alloc_bioset()
|
D | blk-mq.c | 88 if (!(gfp & __GFP_WAIT)) in blk_mq_queue_enter() 268 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, in blk_mq_alloc_request() 272 if (!rq && (gfp & __GFP_WAIT)) { in blk_mq_alloc_request() 1228 __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); in blk_mq_map_request()
|
D | blk-mq-tag.c | 267 if (!(data->gfp & __GFP_WAIT)) in bt_get()
|
D | cfq-iosched.c | 3609 } else if (gfp_mask & __GFP_WAIT) { in cfq_find_alloc_queue() 4224 might_sleep_if(gfp_mask & __GFP_WAIT); in cfq_set_request()
|
D | blk-core.c | 1165 if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) { in get_request()
|
/linux-4.1.27/arch/arm64/mm/ |
D | dma-mapping.c | 103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { in __dma_alloc_coherent() 150 if (!coherent && !(flags & __GFP_WAIT)) { in __dma_alloc()
|
/linux-4.1.27/arch/x86/kernel/ |
D | pci-dma.c | 104 if (flag & __GFP_WAIT) { in dma_generic_alloc_coherent()
|
/linux-4.1.27/Documentation/vm/ |
D | balance | 3 Memory balancing is needed for non __GFP_WAIT as well as for non 6 There are two reasons to be requesting non __GFP_WAIT allocations:
|
/linux-4.1.27/drivers/connector/ |
D | connector.c | 127 return netlink_unicast(dev->nls, skb, portid, !(gfp_mask&__GFP_WAIT)); in cn_netlink_send_mult()
|
/linux-4.1.27/lib/ |
D | radix-tree.c | 190 if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) { in radix_tree_node_alloc() 289 WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT)); in radix_tree_preload() 301 if (gfp_mask & __GFP_WAIT) in radix_tree_maybe_preload()
|
D | idr.c | 402 might_sleep_if(gfp_mask & __GFP_WAIT); in idr_preload() 456 might_sleep_if(gfp_mask & __GFP_WAIT); in idr_alloc()
|
/linux-4.1.27/fs/cachefiles/ |
D | internal.h | 33 #define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC)
|
/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/ |
D | libcfs_private.h | 120 ((mask) & __GFP_WAIT) == 0)); \
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_system_heap.c | 30 __GFP_NORETRY) & ~__GFP_WAIT;
|
/linux-4.1.27/kernel/ |
D | smp.c | 672 might_sleep_if(gfp_flags & __GFP_WAIT); in on_each_cpu_cond()
|
D | audit.c | 1360 if (gfp_mask & __GFP_WAIT) { in audit_log_start() 1362 gfp_mask &= ~__GFP_WAIT; in audit_log_start() 1369 if (gfp_mask & __GFP_WAIT && audit_backlog_wait_time) { in audit_log_start()
|
/linux-4.1.27/fs/fscache/ |
D | page.c | 125 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page() 135 gfp &= ~__GFP_WAIT; in __fscache_maybe_release_page()
|
D | cookie.c | 114 INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT); in __fscache_acquire_cookie()
|
/linux-4.1.27/security/integrity/ima/ |
D | ima_crypto.c | 129 gfp_t gfp_mask = __GFP_WAIT | __GFP_NOWARN | __GFP_NORETRY; in ima_alloc_pages()
|
/linux-4.1.27/drivers/block/ |
D | osdblk.c | 274 gfpmask &= ~__GFP_WAIT; in bio_chain_clone()
|
D | pktcdvd.c | 706 WRITE : READ, __GFP_WAIT); in pkt_generic_packet() 713 __GFP_WAIT); in pkt_generic_packet()
|
D | nvme-core.c | 1092 req = blk_mq_alloc_request(ns->queue, WRITE, (GFP_KERNEL|__GFP_WAIT), in nvme_submit_io_cmd() 1883 (GFP_KERNEL|__GFP_WAIT), false); in nvme_user_cmd()
|
/linux-4.1.27/drivers/md/ |
D | dm-crypt.c | 984 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer() 1000 gfp_mask |= __GFP_WAIT; in crypt_alloc_buffer() 1017 if (unlikely(gfp_mask & __GFP_WAIT)) in crypt_alloc_buffer()
|
/linux-4.1.27/fs/btrfs/ |
D | extent_io.c | 597 if (!prealloc && (mask & __GFP_WAIT)) { in clear_extent_bit() 721 if (mask & __GFP_WAIT) in clear_extent_bit() 853 if (!prealloc && (mask & __GFP_WAIT)) { in __set_extent_bit() 1031 if (mask & __GFP_WAIT) in __set_extent_bit() 1079 if (!prealloc && (mask & __GFP_WAIT)) { in convert_extent_bit() 1256 if (mask & __GFP_WAIT) in convert_extent_bit() 4317 if ((mask & __GFP_WAIT) && in try_release_extent_mapping()
|
D | volumes.c | 156 INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT); in __alloc_device() 157 INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT); in __alloc_device()
|
D | disk-io.c | 2546 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); in open_ctree()
|
/linux-4.1.27/drivers/block/paride/ |
D | pd.c | 724 rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); in pd_special_command()
|
/linux-4.1.27/fs/nfs/ |
D | file.c | 494 if ((gfp & __GFP_WAIT) && in nfs_release_page()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_qm.c | 528 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) in xfs_qm_shrink_scan()
|
/linux-4.1.27/arch/arm/mm/ |
D | dma-mapping.c | 650 else if (!(gfp & __GFP_WAIT)) in __dma_alloc() 1354 if (!(gfp & __GFP_WAIT)) in arm_iommu_alloc_attrs()
|
/linux-4.1.27/net/rxrpc/ |
D | ar-connection.c | 503 if (!(gfp & __GFP_WAIT)) { in rxrpc_connect_call()
|
/linux-4.1.27/drivers/scsi/ |
D | scsi_lib.c | 224 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); in scsi_execute() 230 buffer, bufflen, __GFP_WAIT)) in scsi_execute()
|
/linux-4.1.27/drivers/mtd/ |
D | mtdcore.c | 1183 gfp_t flags = __GFP_NOWARN | __GFP_WAIT | in mtd_kmalloc_up_to()
|
/linux-4.1.27/net/core/ |
D | skbuff.c | 491 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) { in __alloc_rx_skb() 4391 if (gfp_head & __GFP_WAIT) in alloc_skb_with_frags() 4406 page = alloc_pages((gfp_mask & ~__GFP_WAIT) | in alloc_skb_with_frags()
|
D | sock.c | 1882 pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | in skb_page_frag_refill()
|
/linux-4.1.27/drivers/infiniband/core/ |
D | sa_query.c | 621 bool preload = !!(gfp_mask & __GFP_WAIT); in send_mad()
|
/linux-4.1.27/net/sctp/ |
D | associola.c | 1591 bool preload = !!(gfp & __GFP_WAIT); in sctp_assoc_set_id()
|
/linux-4.1.27/drivers/block/drbd/ |
D | drbd_bitmap.c | 1019 page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); in bm_page_io_async()
|
D | drbd_receiver.c | 360 page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); in drbd_alloc_peer_req()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_init.c | 1683 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in qib_setup_eagerbufs()
|
/linux-4.1.27/include/net/ |
D | sock.h | 2049 if (sk->sk_allocation & __GFP_WAIT) in sk_page_frag()
|
/linux-4.1.27/drivers/firewire/ |
D | core-cdev.c | 489 bool preload = !!(gfp_mask & __GFP_WAIT); in add_client_resource()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_file_ops.c | 908 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; in ipath_create_user_egr()
|
/linux-4.1.27/fs/ext3/ |
D | super.c | 753 wait & ~__GFP_WAIT); in bdev_try_to_free_page()
|
/linux-4.1.27/kernel/locking/ |
D | lockdep.c | 2741 if (!(gfp_mask & __GFP_WAIT)) in __lockdep_trace_alloc()
|
/linux-4.1.27/net/netlink/ |
D | af_netlink.c | 2075 if (info.congested && (allocation & __GFP_WAIT)) in netlink_broadcast_filtered()
|
/linux-4.1.27/drivers/iommu/ |
D | amd_iommu.c | 2943 if (!(flag & __GFP_WAIT)) in alloc_coherent()
|
D | intel-iommu.c | 3270 if (flags & __GFP_WAIT) { in intel_alloc_coherent()
|
/linux-4.1.27/drivers/usb/host/ |
D | u132-hcd.c | 2250 if (__GFP_WAIT & mem_flags) { in u132_urb_enqueue()
|
/linux-4.1.27/drivers/block/mtip32xx/ |
D | mtip32xx.c | 182 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); in mtip_get_int_command()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_cmn.c | 674 if (unlikely(gfp_mask & __GFP_WAIT)) in bnx2x_frag_alloc()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem.c | 2072 gfp &= ~(__GFP_IO | __GFP_WAIT); in i915_gem_object_get_pages_gtt()
|