/linux-4.1.27/drivers/staging/lustre/include/linux/libcfs/linux/ |
H A D | linux-mem.h | 63 #define memory_pressure_get() (current->flags & PF_MEMALLOC) 64 #define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0) 65 #define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
|
/linux-4.1.27/kernel/ |
H A D | softirq.c | 241 * Mask out PF_MEMALLOC s current task context is borrowed for the __do_softirq() 242 * softirq. A softirq handled such as network RX might set PF_MEMALLOC __do_softirq() 245 current->flags &= ~PF_MEMALLOC; __do_softirq() 301 tsk_restore_flags(current, old_flags, PF_MEMALLOC); __do_softirq()
|
/linux-4.1.27/drivers/block/ |
H A D | nbd.c | 163 current->flags |= PF_MEMALLOC; sock_xmit() 211 tsk_restore_flags(current, pflags, PF_MEMALLOC); sock_xmit()
|
/linux-4.1.27/drivers/mmc/card/ |
H A D | queue.c | 54 current->flags |= PF_MEMALLOC; mmc_queue_thread()
|
/linux-4.1.27/mm/ |
H A D | swap_state.c | 180 * Radix-tree node allocations from PF_MEMALLOC contexts could add_to_swap()
|
H A D | page_alloc.c | 1985 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 2326 (current->flags & (PF_MEMALLOC | PF_EXITING))) warn_alloc_failed() 2470 current->flags |= PF_MEMALLOC; __alloc_pages_direct_compact() 2473 current->flags &= ~PF_MEMALLOC; __alloc_pages_direct_compact() 2536 current->flags |= PF_MEMALLOC; __perform_reclaim() 2546 current->flags &= ~PF_MEMALLOC; __perform_reclaim() 2654 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) gfp_to_alloc_flags() 2657 ((current->flags & PF_MEMALLOC) || gfp_to_alloc_flags() 2761 if (current->flags & PF_MEMALLOC) __alloc_pages_slowpath() 5788 * __GFP_HIGH and PF_MEMALLOC allocations usually don't for_each_zone()
|
H A D | vmscan.c | 3379 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; kswapd() 3436 tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); kswapd() 3494 p->flags |= PF_MEMALLOC; shrink_all_memory() 3503 p->flags &= ~PF_MEMALLOC; shrink_all_memory() 3683 p->flags |= PF_MEMALLOC | PF_SWAPWRITE; __zone_reclaim() 3699 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); __zone_reclaim() 3729 if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) zone_reclaim()
|
H A D | migrate.c | 790 if (current->flags & PF_MEMALLOC) __unmap_and_move()
|
/linux-4.1.27/drivers/scsi/ |
H A D | iscsi_tcp.c | 377 current->flags |= PF_MEMALLOC; iscsi_sw_tcp_pdu_xmit() 390 tsk_restore_flags(current, pflags, PF_MEMALLOC); iscsi_sw_tcp_pdu_xmit()
|
/linux-4.1.27/fs/ |
H A D | mpage.c | 75 if (bio == NULL && (current->flags & PF_MEMALLOC)) { mpage_alloc()
|
/linux-4.1.27/fs/nfs/blocklayout/ |
H A D | blocklayout.c | 125 if (!bio && (current->flags & PF_MEMALLOC)) { bl_alloc_init_bio()
|
/linux-4.1.27/drivers/mtd/nand/ |
H A D | nandsim.c | 1379 if (current->flags & PF_MEMALLOC) set_memalloc() 1381 current->flags |= PF_MEMALLOC; set_memalloc() 1388 current->flags &= ~PF_MEMALLOC; clear_memalloc()
|
/linux-4.1.27/fs/ext4/ |
H A D | inode.c | 870 * because the caller may be PF_MEMALLOC. 1861 if (current->flags & PF_MEMALLOC) { ext4_writepage() 1867 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ext4_writepage() 1868 == PF_MEMALLOC); ext4_writepage() 4617 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) ext4_write_inode() 4622 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); ext4_write_inode()
|
/linux-4.1.27/fs/xfs/ |
H A D | xfs_aops.c | 969 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == xfs_vm_writepage() 970 PF_MEMALLOC)) xfs_vm_writepage()
|
H A D | xfs_trans_ail.c | 499 current->flags |= PF_MEMALLOC; xfsaild()
|
/linux-4.1.27/drivers/staging/lustre/lustre/lov/ |
H A D | lov_io.c | 588 !(current->flags & PF_MEMALLOC); lov_io_submit()
|
/linux-4.1.27/net/core/ |
H A D | sock.c | 374 current->flags |= PF_MEMALLOC; __sk_backlog_rcv() 376 tsk_restore_flags(current, pflags, PF_MEMALLOC); __sk_backlog_rcv()
|
H A D | dev.c | 3813 * Use PF_MEMALLOC as this saves us from propagating the allocation __netif_receive_skb() 3816 current->flags |= PF_MEMALLOC; __netif_receive_skb() 3818 tsk_restore_flags(current, pflags, PF_MEMALLOC); __netif_receive_skb()
|
/linux-4.1.27/fs/ceph/ |
H A D | addr.c | 506 WARN_ON((current->flags & PF_MEMALLOC) == 0); writepage_nounlock()
|
/linux-4.1.27/fs/reiserfs/ |
H A D | inode.c | 1789 if (wbc->sync_mode == WB_SYNC_ALL && !(current->flags & PF_MEMALLOC)) { reiserfs_write_inode() 2540 /* no logging allowed when nonblocking or from PF_MEMALLOC */ reiserfs_write_full_page() 2541 if (checked && (current->flags & PF_MEMALLOC)) { reiserfs_write_full_page()
|
/linux-4.1.27/fs/ext3/ |
H A D | inode.c | 1192 * be PF_MEMALLOC. 3204 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) ext3_write_inode() 3208 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n"); ext3_write_inode()
|
/linux-4.1.27/fs/jbd/ |
H A D | transaction.c | 1472 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) journal_stop()
|
/linux-4.1.27/fs/jbd2/ |
H A D | transaction.c | 1652 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) jbd2_journal_stop()
|
/linux-4.1.27/include/linux/ |
H A D | sched.h | 1975 #define PF_MEMALLOC 0x00000800 /* Allocating memory */ macro
|
/linux-4.1.27/fs/xfs/libxfs/ |
H A D | xfs_btree.c | 2537 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; xfs_btree_split_worker()
|
/linux-4.1.27/fs/btrfs/ |
H A D | disk-io.c | 3881 if (current->flags & PF_MEMALLOC) __btrfs_btree_balance_dirty()
|
H A D | extent_io.c | 2699 if (bio == NULL && (current->flags & PF_MEMALLOC)) { btrfs_bio_alloc()
|
H A D | inode.c | 8424 if (current->flags & PF_MEMALLOC) { btrfs_writepage()
|
/linux-4.1.27/fs/cifs/ |
H A D | connect.c | 846 current->flags |= PF_MEMALLOC; cifs_demultiplex_thread()
|
/linux-4.1.27/kernel/locking/ |
H A D | lockdep.c | 2745 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) __lockdep_trace_alloc()
|