/linux-4.1.27/mm/ |
H A D | percpu-km.c | 26 * kernel memory block using alloc_pages(), memory will be wasted if 59 pages = alloc_pages(GFP_KERNEL, order_base_2(nr_pages)); pcpu_create_chunk() 94 size_t nr_pages, alloc_pages; pcpu_verify_alloc_info() local 103 alloc_pages = roundup_pow_of_two(nr_pages); pcpu_verify_alloc_info() 105 if (alloc_pages > nr_pages) pcpu_verify_alloc_info() 107 alloc_pages - nr_pages); pcpu_verify_alloc_info()
|
H A D | slob.c | 15 * The slob heap is a set of linked list of pages from alloc_pages(), 30 * alloc_pages() directly, allocating compound pages so the page order 40 * calling alloc_pages(). As SLAB objects know their size, no separate 199 page = alloc_pages(gfp, order); slob_new_pages()
|
H A D | sparse-vmemmap.c | 61 page = alloc_pages( vmemmap_alloc_block()
|
H A D | internal.h | 114 * between functions involved in allocations, including the alloc_pages*
|
H A D | mempool.c | 485 return alloc_pages(gfp_mask, order); mempool_alloc_pages()
|
H A D | sparse.c | 625 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); __kmalloc_section_memmap()
|
H A D | huge_memory.c | 186 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, get_huge_zero_page() 2366 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0), alloc_hugepage()
|
H A D | page_alloc.c | 2962 page = alloc_pages(gfp_mask, order); __get_free_pages() 3011 page = alloc_pages(gfp_mask, order); alloc_kmem_pages() 3067 * This function is similar to alloc_pages(), except that it allocates the 3068 * minimum number of pages to satisfy the request. alloc_pages() can only
|
H A D | hugetlb.c | 1220 * tries to grow the static hugepage pool while alloc_pages() is alloc_buddy_huge_page() 1248 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP| alloc_buddy_huge_page()
|
H A D | filemap.c | 656 return alloc_pages(gfp, 0); __page_cache_alloc()
|
H A D | slub.c | 1324 page = alloc_pages(flags, order); alloc_slab_page()
|
H A D | vmscan.c | 2855 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't try_to_free_mem_cgroup_pages()
|
/linux-4.1.27/arch/cris/include/asm/ |
H A D | pgalloc.h | 34 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); pte_alloc_one()
|
/linux-4.1.27/arch/arm/include/asm/ |
H A D | pgalloc.h | 101 pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); pte_alloc_one() 103 pte = alloc_pages(PGALLOC_GFP, 0); pte_alloc_one()
|
/linux-4.1.27/include/linux/ |
H A D | page_ext.h | 22 * the flag before alloc_pages().
|
H A D | gfp.h | 325 alloc_pages(gfp_t gfp_mask, unsigned int order) alloc_pages() function 335 #define alloc_pages(gfp_mask, order) \ macro 338 alloc_pages(gfp_mask, order) 340 alloc_pages(gfp_mask, order) 342 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
H A D | pagemap.h | 219 return alloc_pages(gfp, 0); __page_cache_alloc()
|
/linux-4.1.27/arch/score/include/asm/ |
H A D | pgalloc.h | 56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte_alloc_one()
|
/linux-4.1.27/arch/nios2/include/asm/ |
H A D | pgalloc.h | 56 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte_alloc_one()
|
/linux-4.1.27/arch/m68k/include/asm/ |
H A D | motorola_pgalloc.h | 35 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); pte_alloc_one()
|
H A D | sun3_pgalloc.h | 52 struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one()
|
H A D | mcf_pgalloc.h | 54 struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); pte_alloc_one()
|
/linux-4.1.27/arch/metag/include/asm/ |
H A D | pgalloc.h | 54 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); pte_alloc_one()
|
/linux-4.1.27/arch/mn10300/mm/ |
H A D | pgtable.c | 77 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); pte_alloc_one() 79 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one()
|
/linux-4.1.27/arch/frv/mm/ |
H A D | pgalloc.c | 36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); pte_alloc_one() 38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one()
|
/linux-4.1.27/arch/unicore32/include/asm/ |
H A D | pgalloc.h | 53 pte = alloc_pages(PGALLOC_GFP, 0); pte_alloc_one()
|
/linux-4.1.27/arch/openrisc/include/asm/ |
H A D | pgalloc.h | 80 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); pte_alloc_one()
|
/linux-4.1.27/drivers/staging/android/ion/ |
H A D | ion_page_pool.c | 29 struct page *page = alloc_pages(pool->gfp_mask, pool->order); ion_page_pool_alloc_pages()
|
H A D | ion_system_heap.c | 70 page = alloc_pages(gfp_flags | __GFP_COMP, order); alloc_buffer_page() 324 page = alloc_pages(low_order_gfp_flags, order); ion_system_contig_heap_allocate()
|
/linux-4.1.27/arch/mips/include/asm/ |
H A D | pgalloc.h | 82 pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); pte_alloc_one()
|
/linux-4.1.27/arch/arm64/include/asm/ |
H A D | pgalloc.h | 85 pte = alloc_pages(PGALLOC_GFP, 0); pte_alloc_one()
|
/linux-4.1.27/arch/avr32/mm/ |
H A D | dma-coherent.c | 56 page = alloc_pages(gfp, order); __dma_alloc()
|
/linux-4.1.27/arch/m68k/kernel/ |
H A D | dma.c | 34 page = alloc_pages(flag, order); dma_alloc_coherent()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_dma.c | 153 p = alloc_pages(flag, get_order(size)); ipath_dma_alloc_coherent()
|
H A D | ipath_kernel.h | 126 /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_dma.c | 143 p = alloc_pages(flag, get_order(size)); qib_dma_alloc_coherent()
|
H A D | qib.h | 178 /* how many alloc_pages() chunks in rcvegrbuf_pages */
|
/linux-4.1.27/arch/x86/include/asm/ |
H A D | pgalloc.h | 84 page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); pmd_alloc_one()
|
/linux-4.1.27/arch/tile/include/asm/ |
H A D | homecache.h | 88 * alloc_pages() / alloc_pages_node() functions, which set and clear
|
/linux-4.1.27/arch/microblaze/include/asm/ |
H A D | pgalloc.h | 124 ptepage = alloc_pages(flags, 0); pte_alloc_one()
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
H A D | lscsa_alloc.c | 95 csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, spu_alloc_lscsa()
|
/linux-4.1.27/fs/logfs/ |
H A D | super.c | 323 super->s_erase_page = alloc_pages(GFP_KERNEL, 0); logfs_get_sb_final() 617 emergency_page = alloc_pages(GFP_KERNEL, 0); logfs_init()
|
/linux-4.1.27/fs/ramfs/ |
H A D | file-nommu.c | 87 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); ramfs_nommu_expand_for_mapping()
|
/linux-4.1.27/fs/exofs/ |
H A D | inode.c | 56 unsigned alloc_pages; member in struct:page_collect 77 pcol->alloc_pages = 0; _pcol_init() 90 pcol->alloc_pages = 0; _pcol_reset() 116 pcol->alloc_pages = pages; pcol_try_alloc() 140 if (unlikely(pcol->nr_pages >= pcol->alloc_pages)) pcol_add_page()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 104 page = alloc_pages(gfp_mask, order); mlx4_alloc_icm_pages()
|
H A D | en_rx.c | 65 page = alloc_pages(gfp, order); mlx4_alloc_pages()
|
/linux-4.1.27/arch/tile/mm/ |
H A D | homecache.c | 389 page = alloc_pages(gfp_mask, order); homecache_alloc_pages()
|
H A D | pgtable.c | 238 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); pgtable_alloc_one()
|
/linux-4.1.27/arch/metag/kernel/ |
H A D | dma.c | 205 page = alloc_pages(gfp, order); dma_alloc_coherent()
|
/linux-4.1.27/arch/s390/pci/ |
H A D | pci_dma.c | 346 page = alloc_pages(flag, get_order(size)); s390_dma_alloc()
|
/linux-4.1.27/arch/mips/mm/ |
H A D | dma-default.c | 149 page = alloc_pages(gfp, get_order(size)); mips_dma_alloc_coherent()
|
/linux-4.1.27/arch/powerpc/mm/ |
H A D | dma-noncoherent.c | 198 page = alloc_pages(gfp, order); __dma_alloc_coherent()
|
H A D | pgtable_32.c | 127 ptepage = alloc_pages(flags, 0); pte_alloc_one()
|
/linux-4.1.27/arch/arm64/mm/ |
H A D | dma-mapping.c | 368 page = alloc_pages(GFP_DMA, pool_size_order); atomic_pool_init()
|
/linux-4.1.27/arch/s390/mm/ |
H A D | pgtable.c | 36 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); crst_table_alloc() 179 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); gmap_alloc() 290 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); gmap_alloc_table()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/ |
H A D | rx.c | 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); iwl_pcie_rxq_alloc_rbs() 292 IWL_DEBUG_INFO(trans, "alloc_pages failed, " iwl_pcie_rxq_alloc_rbs() 298 IWL_CRIT(trans, "Failed to alloc_pages with %s." iwl_pcie_rxq_alloc_rbs()
|
H A D | trans.c | 127 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO, iwl_pcie_alloc_fw_monitor()
|
/linux-4.1.27/drivers/net/ethernet/tile/ |
H A D | tilegx.c | 1009 /* Round up to 64KB and then use alloc_pages() so we get the create_buffer_stack() 1383 headers_page = alloc_pages(GFP_KERNEL, headers_order); tile_net_init_egress() 1395 edescs_page = alloc_pages(GFP_KERNEL, edescs_order); tile_net_init_egress() 1406 equeue_page = alloc_pages(GFP_KERNEL, equeue_order); tile_net_init_egress()
|
H A D | tilepro.c | 2316 priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); tile_net_dev_init()
|
/linux-4.1.27/drivers/target/ |
H A D | target_core_rd.c | 183 pg = alloc_pages(GFP_KERNEL, 0); rd_allocate_sgl_table()
|
/linux-4.1.27/drivers/gpu/drm/armada/ |
H A D | armada_gem.c | 101 struct page *p = alloc_pages(GFP_KERNEL, order); armada_gem_linear_back()
|
/linux-4.1.27/arch/x86/mm/ |
H A D | pgtable.c | 28 pte = alloc_pages(__userpte_alloc_gfp, 0); pte_alloc_one()
|
H A D | pageattr.c | 687 base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); split_large_page()
|
/linux-4.1.27/arch/powerpc/sysdev/ |
H A D | dart_iommu.c | 444 p = alloc_pages(GFP_KERNEL, 21 - PAGE_SHIFT); iommu_init_late_dart()
|
/linux-4.1.27/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 791 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, its_alloc_lpi_tables() 994 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, its_cpu_init_lpis()
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | obd_support.h | 826 CERROR("alloc_pages of '" #ptr "' %d page(s) / %llu bytes "\ 838 CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \
|
/linux-4.1.27/drivers/mmc/card/ |
H A D | mmc_test.c | 44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'. 359 page = alloc_pages(flags, order); mmc_test_alloc_mem() 2871 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
|
/linux-4.1.27/arch/arm/mm/ |
H A D | dma-mapping.c | 259 page = alloc_pages(gfp, order); __dma_alloc_buffer() 1161 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); __iommu_alloc_buffer() 1171 pages[i] = alloc_pages(gfp, 0); __iommu_alloc_buffer()
|
/linux-4.1.27/drivers/media/v4l2-core/ |
H A D | videobuf2-dma-sg.c | 79 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | vb2_dma_sg_alloc_compacted()
|
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/ |
H A D | xgbe-desc.c | 268 pages = alloc_pages(gfp, order); xgbe_alloc_pages()
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
H A D | mthca_memfree.c | 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); mthca_alloc_icm_pages()
|
/linux-4.1.27/arch/x86/kernel/ |
H A D | amd_gart_64.c | 488 page = alloc_pages(flag | __GFP_ZERO, get_order(size)); gart_alloc_coherent()
|
/linux-4.1.27/drivers/char/agp/ |
H A D | i460-agp.c | 371 lp->page = alloc_pages(GFP_KERNEL, order); i460_alloc_large_page()
|
H A D | intel-gtt.c | 147 page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2); i8xx_alloc_pages()
|
/linux-4.1.27/arch/arm/kernel/ |
H A D | signal.c | 608 page = alloc_pages(GFP_KERNEL, 0); get_signal_page()
|
/linux-4.1.27/drivers/iommu/ |
H A D | fsl_pamu.c | 1093 p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); fsl_pamu_probe()
|
H A D | amd_iommu.c | 2930 page = alloc_pages(flag, get_order(size)); alloc_coherent() 2941 page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); alloc_coherent()
|
H A D | intel-iommu.c | 3282 page = alloc_pages(flags, order); intel_alloc_coherent()
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
H A D | rx.c | 166 page = alloc_pages(__GFP_COLD | __GFP_COMP | efx_init_rx_buffers()
|
/linux-4.1.27/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 706 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); vfio_test_domain_fgsp()
|
/linux-4.1.27/drivers/hv/ |
H A D | hv_balloon.c | 1104 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY | alloc_balloon_pages()
|
/linux-4.1.27/arch/x86/kvm/ |
H A D | svm.c | 874 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER); svm_hardware_setup() 1240 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); svm_create_vcpu() 1244 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); svm_create_vcpu()
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
H A D | 4965-mac.c | 340 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); il4965_rx_allocate() 343 D_INFO("alloc_pages failed, " "order: %d\n", il4965_rx_allocate() 348 IL_ERR("Failed to alloc_pages with %s. " il4965_rx_allocate()
|
H A D | 3945-mac.c | 1028 page = alloc_pages(gfp_mask, il->hw_params.rx_page_order); il3945_rx_allocate()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
H A D | ehca_mrmw.c | 2558 p = alloc_pages(flag, get_order(size)); ehca_dma_alloc_coherent()
|
/linux-4.1.27/arch/sparc/mm/ |
H A D | init_64.c | 2265 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); mem_init()
|
/linux-4.1.27/fs/ubifs/ |
H A D | file.c | 43 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in
|
/linux-4.1.27/fs/ecryptfs/ |
H A D | crypto.c | 1166 page = alloc_pages(gfp_mask | __GFP_ZERO, order); ecryptfs_get_zeroed_pages()
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
H A D | cassini.c | 498 page->buffer = alloc_pages(flags, cp->page_order); cas_page_alloc() 3415 struct page *page = alloc_pages(GFP_ATOMIC, cas_check_invariants()
|
/linux-4.1.27/net/core/ |
H A D | skbuff.c | 547 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages 4406 page = alloc_pages((gfp_mask & ~__GFP_WAIT) | alloc_skb_with_frags()
|
H A D | sock.c | 1882 pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP | skb_page_frag_refill()
|
/linux-4.1.27/drivers/scsi/ |
H A D | sg.c | 1866 schp->pages[k] = alloc_pages(gfp_mask, order); sg_build_indirect()
|
H A D | osst.c | 5360 struct page *page = alloc_pages(priority, order); enlarge_buffer() 5376 struct page *page = alloc_pages(priority, (OS_FRAME_SIZE - got <= PAGE_SIZE) ? 0 : order); enlarge_buffer()
|
H A D | st.c | 3805 page = alloc_pages(priority, order);
|
H A D | pmcraid.c | 3302 page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order); pmcraid_alloc_sglist()
|
H A D | ipr.c | 3777 page = alloc_pages(GFP_KERNEL, order); ipr_alloc_ucode_buffer()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
H A D | bnad.c | 384 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, bnad_rxq_refill_page()
|
/linux-4.1.27/kernel/ |
H A D | kexec.c | 644 pages = alloc_pages(gfp_mask, order); kimage_alloc_pages()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 449 q->pg_chunk.page = alloc_pages(gfp, order); alloc_pg_chunk()
|
/linux-4.1.27/drivers/net/ethernet/emulex/benet/ |
H A D | be_main.c | 2049 return alloc_pages(gfp, order); be_alloc_pages()
|
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/ |
H A D | myri10ge.c | 1314 alloc_pages(GFP_ATOMIC | __GFP_COMP, myri10ge_alloc_rx_pages()
|
/linux-4.1.27/drivers/net/ethernet/qlogic/qlge/ |
H A D | qlge_main.c | 1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | ql_get_next_chunk()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_cmn.c | 547 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT); bnx2x_alloc_rx_sge()
|