Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 186 of 186) sorted by relevance

/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_page_alloc.c77 unsigned npages; member
276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
280 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
281 for (i = 0; i < npages; ++i) in ttm_pages_put()
288 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
430 count += _manager->pools[i].npages; in ttm_pool_shrink_count()
593 && count > pool->npages) { in ttm_page_pool_fill_locked()
611 pool->npages += alloc_size; in ttm_page_pool_fill_locked()
619 pool->npages += cpages; in ttm_page_pool_fill_locked()
[all …]
Dttm_page_alloc_dma.c379 struct page *pages[], unsigned npages) in ttm_dma_pages_put() argument
384 if (npages && !(pool->type & IS_CACHED) && in ttm_dma_pages_put()
385 set_pages_array_wb(pages, npages)) in ttm_dma_pages_put()
387 pool->dev_name, npages); in ttm_dma_pages_put()
939 unsigned count = 0, i, npages = 0; in ttm_dma_unpopulate() local
963 npages = count; in ttm_dma_unpopulate()
965 npages = pool->npages_free - _manager->options.max_size; in ttm_dma_unpopulate()
968 if (npages < NUM_PAGES_TO_ALLOC) in ttm_dma_unpopulate()
969 npages = NUM_PAGES_TO_ALLOC; in ttm_dma_unpopulate()
995 if (npages) in ttm_dma_unpopulate()
[all …]
Dttm_bo.c1183 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; in ttm_bo_acc_size() local
1187 size += PAGE_ALIGN(npages * sizeof(void *)); in ttm_bo_acc_size()
1197 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; in ttm_bo_dma_acc_size() local
1201 size += PAGE_ALIGN(npages * sizeof(void *)); in ttm_bo_dma_acc_size()
1202 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); in ttm_bo_dma_acc_size()
/linux-4.1.27/arch/sparc/kernel/
Diommu.c158 unsigned long npages) in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
204 int npages, nid; in dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); in dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
[all …]
Dpci_sun4v.c43 unsigned long npages; /* Number of pages in list. */ member
57 p->npages = 0; in iommu_batch_start()
68 unsigned long npages = p->npages; in iommu_batch_flush() local
70 while (npages != 0) { in iommu_batch_flush()
74 npages, prot, __pa(pglist)); in iommu_batch_flush()
81 npages, prot, __pa(pglist), num); in iommu_batch_flush()
86 npages -= num; in iommu_batch_flush()
91 p->npages = 0; in iommu_batch_flush()
100 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
112 BUG_ON(p->npages >= PGLIST_NENTS); in iommu_batch_add()
[all …]
Dldc.c1017 unsigned long entry, unsigned long npages) in ldc_demap() argument
1024 for (i = 0; i < npages; i++) { in ldc_demap()
1950 unsigned long npages) in alloc_npages() argument
1955 npages, NULL, (unsigned long)-1, 0); in alloc_npages()
2084 unsigned long i, npages; in ldc_map_sg() local
2097 npages = err; in ldc_map_sg()
2103 base = alloc_npages(iommu, npages); in ldc_map_sg()
2128 unsigned long npages, pa; in ldc_map_single() local
2140 npages = pages_in_region(pa, len); in ldc_map_single()
2144 base = alloc_npages(iommu, npages); in ldc_map_single()
[all …]
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c55 s32 npages; member
166 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
184 *npages = be32_to_cpu(out.num_pages); in mlx5_cmd_query_pages()
278 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
289 inlen = sizeof(*in) + npages * sizeof(in->pas[0]); in give_pages()
297 for (i = 0; i < npages; i++) { in give_pages()
314 in->num_entries = cpu_to_be32(npages); in give_pages()
318 func_id, npages, err); in give_pages()
321 dev->priv.fw_pages += npages; in give_pages()
327 func_id, npages, out.hdr.status); in give_pages()
[all …]
Dalloc.c58 buf->npages = 1; in mlx5_buf_alloc()
69 buf->npages *= 2; in mlx5_buf_alloc()
76 buf->npages = buf->nbufs; in mlx5_buf_alloc()
232 for (i = 0; i < buf->npages; i++) { in mlx5_fill_page_array()
Deq.c276 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); in mlx5_eq_int() local
279 func_id, npages); in mlx5_eq_int()
280 mlx5_core_req_pages_handler(dev, func_id, npages); in mlx5_eq_int()
356 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; in mlx5_create_map_eq()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_mem.c81 int npages) in iwch_reregister_mem() argument
87 if (npages > mhp->attr.pbl_size) in iwch_reregister_mem()
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument
112 npages << 3); in iwch_alloc_pbl()
117 mhp->attr.pbl_size = npages; in iwch_alloc_pbl()
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument
131 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()
138 int *npages, in build_phys_page_list() argument
176 *npages = 0; in build_phys_page_list()
178 *npages += (buffer_list[i].size + in build_phys_page_list()
[all …]
Dcxio_dbg.c78 int size, npages; in cxio_dump_pbl() local
81 npages = (len + (1ULL << shift) - 1) >> shift; in cxio_dump_pbl()
82 size = npages * sizeof(u64); in cxio_dump_pbl()
93 __func__, m->addr, m->len, npages); in cxio_dump_pbl()
Diwch_provider.h345 int npages);
346 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
348 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
353 int *npages,
Diwch_provider.c481 int npages; in iwch_register_phys_mem() local
510 &total_size, &npages, &shift, &page_list); in iwch_register_phys_mem()
514 ret = iwch_alloc_pbl(mhp, npages); in iwch_register_phys_mem()
520 ret = iwch_write_pbl(mhp, page_list, npages, 0); in iwch_register_phys_mem()
533 mhp->attr.pbl_size = npages; in iwch_register_phys_mem()
563 int npages = 0; in iwch_reregister_phys_mem() local
589 &total_size, &npages, in iwch_reregister_phys_mem()
595 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); in iwch_reregister_phys_mem()
609 mhp->attr.pbl_size = npages; in iwch_reregister_phys_mem()
/linux-4.1.27/arch/powerpc/kernel/
Diommu.c178 unsigned long npages, in iommu_range_alloc() argument
185 int largealloc = npages > 15; in iommu_range_alloc()
198 if (unlikely(npages == 0)) { in iommu_range_alloc()
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
285 end = n + npages; in iommu_range_alloc()
307 void *page, unsigned int npages, in iommu_alloc() argument
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
325 build_fail = ppc_md.tce_build(tbl, entry, npages, in iommu_alloc()
335 __iommu_free(tbl, ret, npages); in iommu_alloc()
350 unsigned int npages) in iommu_free_check() argument
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
Dsec_bulk.c174 static void enc_pools_release_free_pages(long npages) in enc_pools_release_free_pages() argument
179 LASSERT(npages > 0); in enc_pools_release_free_pages()
180 LASSERT(npages <= page_pools.epp_free_pages); in enc_pools_release_free_pages()
186 page_pools.epp_free_pages -= npages; in enc_pools_release_free_pages()
187 page_pools.epp_total_pages -= npages; in enc_pools_release_free_pages()
197 while (npages--) { in enc_pools_release_free_pages()
276 int npages_to_npools(unsigned long npages) in npages_to_npools() argument
278 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL); in npages_to_npools()
312 static void enc_pools_insert(struct page ***pools, int npools, int npages) in enc_pools_insert() argument
318 LASSERT(npages > 0); in enc_pools_insert()
[all …]
Dptlrpc_internal.h56 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
Dclient.c100 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, in ptlrpc_new_bulk() argument
106 OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages])); in ptlrpc_new_bulk()
112 desc->bd_max_iov = npages; in ptlrpc_new_bulk()
135 unsigned npages, unsigned max_brw, in ptlrpc_prep_bulk_imp() argument
142 desc = ptlrpc_new_bulk(npages, max_brw, type, portal); in ptlrpc_prep_bulk_imp()
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c199 int npages, shift; in mthca_buf_alloc() local
206 npages = 1; in mthca_buf_alloc()
220 npages *= 2; in mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
Dmthca_memfree.h53 int npages; member
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member
Dmthca_eq.c470 int npages; in mthca_create_eq() local
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq()
487 for (i = 0; i < npages; ++i) in mthca_create_eq()
490 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_create_eq()
499 for (i = 0; i < npages; ++i) { in mthca_create_eq()
519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq()
520 0, npages * PAGE_SIZE, in mthca_create_eq()
571 for (i = 0; i < npages; ++i) in mthca_create_eq()
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq() local
[all …]
Dmthca_provider.c904 int npages; in mthca_reg_phys_mr() local
931 npages = 0; in mthca_reg_phys_mr()
933 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; in mthca_reg_phys_mr()
935 if (!npages) in mthca_reg_phys_mr()
938 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); in mthca_reg_phys_mr()
956 shift, npages); in mthca_reg_phys_mr()
960 page_list, shift, npages, in mthca_reg_phys_mr()
/linux-4.1.27/drivers/infiniband/core/
Dumem.c58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
92 unsigned long npages; in ib_umem_get() local
164 npages = ib_umem_num_pages(umem); in ib_umem_get()
168 locked = npages + current->mm->pinned_vm; in ib_umem_get()
178 if (npages == 0) { in ib_umem_get()
183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
190 while (npages) { in ib_umem_get()
192 min_t(unsigned long, npages, in ib_umem_get()
199 umem->npages += ret; in ib_umem_get()
201 npages -= ret; in ib_umem_get()
[all …]
Dumem_odp.c528 int j, k, ret = 0, start_idx, npages = 0; in ib_umem_odp_map_dma_pages() local
575 npages = get_user_pages(owning_process, owning_mm, user_virt, in ib_umem_odp_map_dma_pages()
581 if (npages < 0) in ib_umem_odp_map_dma_pages()
584 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); in ib_umem_odp_map_dma_pages()
585 user_virt += npages << PAGE_SHIFT; in ib_umem_odp_map_dma_pages()
587 for (j = 0; j < npages; ++j) { in ib_umem_odp_map_dma_pages()
599 for (++j; j < npages; ++j) in ib_umem_odp_map_dma_pages()
606 if (npages < 0 && k == start_idx) in ib_umem_odp_map_dma_pages()
607 ret = npages; in ib_umem_odp_map_dma_pages()
/linux-4.1.27/arch/x86/kernel/
Dpci-calgary_64.c204 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument
216 end = index + npages; in iommu_range_reserve()
222 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve()
229 unsigned int npages) in iommu_range_alloc() argument
238 BUG_ON(npages == 0); in iommu_range_alloc()
243 npages, 0, boundary_size, 0); in iommu_range_alloc()
248 npages, 0, boundary_size, 0); in iommu_range_alloc()
259 tbl->it_hint = offset + npages; in iommu_range_alloc()
268 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument
273 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc()
[all …]
Dtce_64.c50 unsigned int npages, unsigned long uaddr, int direction) in tce_build() argument
62 while (npages--) { in tce_build()
75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument
81 while (npages--) { in tce_free()
Damd_gart_64.c217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); in dma_map_area() local
224 iommu_page = alloc_iommu(dev, npages, align_mask); in dma_map_area()
234 for (i = 0; i < npages; i++) { in dma_map_area()
270 int npages; in gart_unmap_page() local
278 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in gart_unmap_page()
279 for (i = 0; i < npages; i++) { in gart_unmap_page()
282 free_iommu(iommu_page, npages); in gart_unmap_page()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dmr.c197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
202 if (!npages) { in mlx4_mtt_init()
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
420 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
530 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
540 access, npages, page_shift, mr); in mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
597 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
695 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
Dicm.c59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages()
62 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
127 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
153 while (npages > 0) { in mlx4_alloc_icm()
168 chunk->npages = 0; in mlx4_alloc_icm()
173 while (1 << cur_order > npages) in mlx4_alloc_icm()
178 &chunk->mem[chunk->npages], in mlx4_alloc_icm()
181 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm()
192 ++chunk->npages; in mlx4_alloc_icm()
[all …]
Dicm.h52 int npages; member
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
Deq.c926 int npages; in mlx4_create_eq() local
938 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; in mlx4_create_eq()
940 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq()
945 for (i = 0; i < npages; ++i) in mlx4_create_eq()
948 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mlx4_create_eq()
957 for (i = 0; i < npages; ++i) { in mlx4_create_eq()
981 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq()
985 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq()
1025 for (i = 0; i < npages; ++i) in mlx4_create_eq()
1050 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; in mlx4_free_eq() local
[all …]
Dalloc.c593 buf->npages = 1; in mlx4_buf_alloc()
604 buf->npages *= 2; in mlx4_buf_alloc()
613 buf->npages = buf->nbufs; in mlx4_buf_alloc()
806 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
Dresource_tracker.c3021 int npages = vhcr->in_modifier; in mlx4_WRITE_MTT_wrapper() local
3024 err = get_containing_mtt(dev, slave, start, npages, &rmtt); in mlx4_WRITE_MTT_wrapper()
3035 for (i = 0; i < npages; ++i) in mlx4_WRITE_MTT_wrapper()
3038 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, in mlx4_WRITE_MTT_wrapper()
Dmlx4.h997 int start_index, int npages, u64 *page_list);
/linux-4.1.27/arch/sparc/mm/
Diommu.c177 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) in iommu_get_one() argument
186 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one()
194 for (i = 0; i < npages; i++) { in iommu_get_one()
202 iommu_flush_iotlb(iopte0, npages); in iommu_get_one()
210 int npages; in iommu_get_scsi_one() local
215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one()
217 busa = iommu_get_one(dev, page, npages); in iommu_get_scsi_one()
283 static void iommu_release_one(struct device *dev, u32 busa, int npages) in iommu_release_one() argument
291 for (i = 0; i < npages; i++) { in iommu_release_one()
296 bit_map_clear(&iommu->usemap, ioptex, npages); in iommu_release_one()
[all …]
Dio-unit.c96 int i, j, k, npages; in iounit_get_area() local
100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area()
103 switch (npages) { in iounit_get_area()
109 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); in iounit_get_area()
116 if (scan + npages > limit) { in iounit_get_area()
127 for (k = 1, scan++; k < npages; k++) in iounit_get_area()
131 scan -= npages; in iounit_get_area()
134 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { in iounit_get_area()
Dsrmmu.c951 unsigned long npages; in srmmu_paging_init() local
957 npages = max_low_pfn - pfn_base; in srmmu_paging_init()
959 zones_size[ZONE_DMA] = npages; in srmmu_paging_init()
960 zholes_size[ZONE_DMA] = npages - pages_avail; in srmmu_paging_init()
962 npages = highend_pfn - max_low_pfn; in srmmu_paging_init()
963 zones_size[ZONE_HIGHMEM] = npages; in srmmu_paging_init()
964 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); in srmmu_paging_init()
/linux-4.1.27/arch/x86/kvm/
Diommu.c44 gfn_t base_gfn, unsigned long npages);
47 unsigned long npages) in kvm_pin_pages() argument
53 end_gfn = gfn + npages; in kvm_pin_pages()
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) in kvm_unpin_pages() argument
69 for (i = 0; i < npages; ++i) in kvm_unpin_pages()
86 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages()
272 gfn_t base_gfn, unsigned long npages) in kvm_iommu_put_pages() argument
280 end_gfn = base_gfn + npages; in kvm_iommu_put_pages()
316 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); in kvm_iommu_unmap_pages()
Dpaging_tmpl.h152 int npages; in FNAME() local
157 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); in FNAME()
159 if (unlikely(npages != 1)) in FNAME()
Dmmu.c1428 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
4385 last_gfn = memslot->base_gfn + memslot->npages - 1; in kvm_mmu_slot_remove_write_access()
4476 last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1, in kvm_mmu_zap_collapsible_sptes()
4506 last_gfn = memslot->base_gfn + memslot->npages - 1; in kvm_mmu_slot_leaf_clear_dirty()
4544 last_gfn = memslot->base_gfn + memslot->npages - 1; in kvm_mmu_slot_largepage_remove_write_access()
4582 last_gfn = memslot->base_gfn + memslot->npages - 1; in kvm_mmu_slot_set_dirty()
4831 nr_pages += memslot->npages; in kvm_mmu_calculate_mmu_pages()
Dx86.c7534 unsigned long npages) in kvm_arch_create_memslot() argument
7543 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_arch_create_memslot()
7560 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_arch_create_memslot()
7617 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, in kvm_arch_prepare_memory_region()
7692 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
/linux-4.1.27/arch/arm64/kernel/
Defi.c171 u64 paddr, npages, size; in reserve_regions() local
178 npages = md->num_pages; in reserve_regions()
184 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, in reserve_regions()
188 memrange_efi_to_native(&paddr, &npages); in reserve_regions()
189 size = npages << PAGE_SHIFT; in reserve_regions()
237 u64 paddr, npages, size; in efi_virtmap_init() local
246 npages = md->num_pages; in efi_virtmap_init()
247 memrange_efi_to_native(&paddr, &npages); in efi_virtmap_init()
248 size = npages << PAGE_SHIFT; in efi_virtmap_init()
/linux-4.1.27/fs/nfs/
Dnfs3acl.c157 unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT); in __nfs3_proc_setacls() local
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); in __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) in __nfs3_proc_setacls()
164 args.npages++; in __nfs3_proc_setacls()
165 } while (args.npages < npages); in __nfs3_proc_setacls()
197 while (args.npages != 0) { in __nfs3_proc_setacls()
198 args.npages--; in __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); in __nfs3_proc_setacls()
Ddirect.c271 static void nfs_direct_release_pages(struct page **pages, unsigned int npages) in nfs_direct_release_pages() argument
274 for (i = 0; i < npages; i++) in nfs_direct_release_pages()
491 unsigned npages, i; in nfs_direct_read_schedule_iovec() local
500 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_read_schedule_iovec()
501 for (i = 0; i < npages; i++) { in nfs_direct_read_schedule_iovec()
524 nfs_direct_release_pages(pagevec, npages); in nfs_direct_read_schedule_iovec()
878 unsigned npages, i; in nfs_direct_write_schedule_iovec() local
887 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_write_schedule_iovec()
888 for (i = 0; i < npages; i++) { in nfs_direct_write_schedule_iovec()
915 nfs_direct_release_pages(pagevec, npages); in nfs_direct_write_schedule_iovec()
Dfscache.c372 unsigned npages = *nr_pages; in __nfs_readpages_from_fscache() local
376 nfs_i_fscache(inode), npages, inode); in __nfs_readpages_from_fscache()
383 if (*nr_pages < npages) in __nfs_readpages_from_fscache()
385 npages); in __nfs_readpages_from_fscache()
Dread.c377 unsigned long npages; in nfs_readpages() local
415 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> in nfs_readpages()
417 nfs_add_stats(inode, NFSIOS_READPAGES, npages); in nfs_readpages()
Ddir.c586 void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages) in nfs_readdir_free_pagearray() argument
589 for (i = 0; i < npages; i++) in nfs_readdir_free_pagearray()
595 unsigned int npages) in nfs_readdir_free_large_page() argument
597 nfs_readdir_free_pagearray(pages, npages); in nfs_readdir_free_large_page()
605 int nfs_readdir_large_page(struct page **pages, unsigned int npages) in nfs_readdir_large_page() argument
609 for (i = 0; i < npages; i++) { in nfs_readdir_large_page()
Dpagelist.c34 p->npages = pagecount; in nfs_pgarray_set()
40 p->npages = 0; in nfs_pgarray_set()
Dnfs4proc.c4574 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); in __nfs4_get_acl_uncached() local
4579 if (npages == 0) in __nfs4_get_acl_uncached()
4580 npages = 1; in __nfs4_get_acl_uncached()
4581 if (npages > ARRAY_SIZE(pages)) in __nfs4_get_acl_uncached()
4584 for (i = 0; i < npages; i++) { in __nfs4_get_acl_uncached()
4595 args.acl_len = npages * PAGE_SIZE; in __nfs4_get_acl_uncached()
4599 __func__, buf, buflen, npages, args.acl_len); in __nfs4_get_acl_uncached()
4624 for (i = 0; i < npages; i++) in __nfs4_get_acl_uncached()
4681 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); in __nfs4_proc_set_acl() local
4686 if (npages > ARRAY_SIZE(pages)) in __nfs4_proc_set_acl()
Dnfs3xdr.c1342 if (args->npages != 0) in nfs3_xdr_enc_setacl3args()
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_64_vio.c104 long npages; in kvm_vm_ioctl_create_spapr_tce() local
114 npages = kvmppc_stt_npages(args->window_size); in kvm_vm_ioctl_create_spapr_tce()
116 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), in kvm_vm_ioctl_create_spapr_tce()
125 for (i = 0; i < npages; i++) { in kvm_vm_ioctl_create_spapr_tce()
143 for (i = 0; i < npages; i++) in kvm_vm_ioctl_create_spapr_tce()
Dbook3s_64_mmu_hv.c178 unsigned long npages; in kvmppc_map_vrma() local
188 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
191 if (npages > 1ul << (40 - porder)) in kvmppc_map_vrma()
192 npages = 1ul << (40 - porder); in kvmppc_map_vrma()
194 if (npages > kvm->arch.hpt_mask + 1) in kvmppc_map_vrma()
195 npages = kvm->arch.hpt_mask + 1; in kvmppc_map_vrma()
202 for (i = 0; i < npages; ++i) { in kvmppc_map_vrma()
448 long index, ret, npages; in kvmppc_book3s_hv_page_fault() local
513 npages = get_user_pages_fast(hva, 1, writing, pages); in kvmppc_book3s_hv_page_fault()
514 if (npages < 1) { in kvmppc_book3s_hv_page_fault()
[all …]
Dbook3s_hv_rm_mmu.c376 long npages, int global, bool need_sync) in do_tlbies() argument
385 for (i = 0; i < npages; ++i) in do_tlbies()
393 for (i = 0; i < npages; ++i) in do_tlbies()
Dbook3s.c748 unsigned long npages) in kvmppc_core_create_memslot() argument
750 return kvm->arch.kvm_ops->create_memslot(slot, npages); in kvmppc_core_create_memslot()
Dbook3s_pr.c266 (memslot->npages << PAGE_SHIFT)); in do_kvm_unmap_hva()
1551 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log_pr()
1593 unsigned long npages) in kvmppc_core_create_memslot_pr() argument
Dbook3s_hv.c2372 unsigned long npages) in kvmppc_core_create_memslot_hv() argument
2374 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); in kvmppc_core_create_memslot_hv()
2392 unsigned long npages = mem->memory_size >> PAGE_SHIFT; in kvmppc_core_commit_memory_region_hv() local
2395 if (npages && old->npages) { in kvmppc_core_commit_memory_region_hv()
Dpowerpc.c591 unsigned long npages) in kvm_arch_create_memslot() argument
593 return kvmppc_core_create_memslot(kvm, slot, npages); in kvm_arch_create_memslot()
De500_mmu_host.c384 slot_end = slot_start + slot->npages; in kvmppc_e500_shadow_map()
Dbooke.c1780 unsigned long npages) in kvmppc_core_create_memslot() argument
/linux-4.1.27/arch/powerpc/platforms/pasemi/
Diommu.c89 long npages, unsigned long uaddr, in iobmap_build() argument
97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); in iobmap_build()
103 while (npages--) { in iobmap_build()
118 long npages) in iobmap_free() argument
123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); in iobmap_free()
129 while (npages--) { in iobmap_free()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dmr.c140 int npages = 1 << ent->order; in add_keys() local
163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); in add_keys()
675 int npages; in get_octo_len() local
678 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); in get_octo_len()
679 return (npages + 1) / 2; in get_octo_len()
710 umrwr->npages = n; in prep_umr_reg_wqe()
752 u64 virt_addr, u64 len, int npages, in reg_umr() argument
787 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); in reg_umr()
797 memset(pas + npages, 0, size - npages * sizeof(u64)); in reg_umr()
807 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, in reg_umr()
[all …]
Dodp.c194 int npages = 0, ret = 0; in pagefault_single_data_segment() local
244 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
246 if (npages < 0) { in pagefault_single_data_segment()
247 ret = npages; in pagefault_single_data_segment()
251 if (npages > 0) { in pagefault_single_data_segment()
259 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); in pagefault_single_data_segment()
271 u32 new_mappings = npages * PAGE_SIZE - in pagefault_single_data_segment()
296 return ret ? ret : npages; in pagefault_single_data_segment()
322 int ret = 0, npages = 0; in pagefault_data_segments() local
380 npages += ret; in pagefault_data_segments()
[all …]
Dsrq.c84 int npages; in create_srq_user() local
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user()
160 int npages; in create_srq_kernel() local
185 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); in create_srq_kernel()
187 buf_size, page_shift, srq->buf.npages, npages); in create_srq_kernel()
188 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; in create_srq_kernel()
Dmlx5_ib.h256 unsigned int npages; member
324 int npages; member
572 int npages, int zap);
585 int npages, u64 iova);
Dcq.c612 int npages; in create_cq_user() local
646 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
649 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); in create_cq_user()
712 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; in create_cq_kernel()
952 int npages; in resize_user() local
969 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, in resize_user()
1102 npas = cq->resize_buf->buf.npages; in mlx5_ib_resize_cq()
Dqp.c610 int npages; in create_user_qp() local
666 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, in create_user_qp()
674 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); in create_user_qp()
778 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; in create_kernel_qp()
1865 static __be16 get_klm_octo(int npages) in get_klm_octo() argument
1867 return cpu_to_be16(ALIGN(npages, 8) / 2); in get_klm_octo()
1977 umr->klm_octowords = get_klm_octo(umrwr->npages); in set_reg_umr_segment()
/linux-4.1.27/fs/ramfs/
Dfile-nommu.c68 unsigned long npages, xpages, loop; in ramfs_nommu_expand_for_mapping() local
93 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; in ramfs_nommu_expand_for_mapping()
98 for (loop = npages; loop < xpages; loop++) in ramfs_nommu_expand_for_mapping()
102 newsize = PAGE_SIZE * npages; in ramfs_nommu_expand_for_mapping()
107 for (loop = 0; loop < npages; loop++) { in ramfs_nommu_expand_for_mapping()
126 while (loop < npages) in ramfs_nommu_expand_for_mapping()
/linux-4.1.27/arch/alpha/kernel/
Dpci_iommu.c257 long npages, dma_ofs, i; in pci_map_single_1() local
300 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in pci_map_single_1()
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1()
313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in pci_map_single_1()
320 cpu_addr, size, npages, ret, __builtin_return_address(0)); in pci_map_single_1()
378 long dma_ofs, npages; in alpha_pci_unmap_page() local
411 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in alpha_pci_unmap_page()
415 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page()
426 dma_addr, size, npages, __builtin_return_address(0)); in alpha_pci_unmap_page()
562 long npages, dma_ofs, i; in sg_fill() local
[all …]
/linux-4.1.27/lib/
Diommu-common.c103 unsigned long npages, in iommu_tbl_range_alloc() argument
116 bool largealloc = (large_pool && npages > iommu_large_alloc); in iommu_tbl_range_alloc()
124 if (unlikely(npages == 0)) { in iommu_tbl_range_alloc()
189 n = iommu_area_alloc(iommu->map, limit, start, npages, shift, in iommu_tbl_range_alloc()
219 end = n + npages; in iommu_tbl_range_alloc()
256 unsigned long npages, unsigned long entry) in iommu_tbl_range_free() argument
267 bitmap_clear(iommu->map, entry, npages); in iommu_tbl_range_free()
Diov_iter.c754 int npages = 0; in iov_iter_npages() local
761 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) in iov_iter_npages()
763 if (npages >= maxpages) in iov_iter_npages()
766 npages++; in iov_iter_npages()
767 if (npages >= maxpages) in iov_iter_npages()
771 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) in iov_iter_npages()
773 if (npages >= maxpages) in iov_iter_npages()
777 return npages; in iov_iter_npages()
/linux-4.1.27/crypto/
Daf_alg.c395 int npages, i; in af_alg_make_sg() local
401 npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; in af_alg_make_sg()
402 if (WARN_ON(npages == 0)) in af_alg_make_sg()
405 sg_init_table(sgl->sg, npages + 1); in af_alg_make_sg()
407 for (i = 0, len = n; i < npages; i++) { in af_alg_make_sg()
415 sg_mark_end(sgl->sg + npages - 1); in af_alg_make_sg()
416 sgl->npages = npages; in af_alg_make_sg()
424 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); in af_alg_link_sg()
425 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); in af_alg_link_sg()
433 for (i = 0; i < sgl->npages; i++) in af_alg_free_sg()
/linux-4.1.27/arch/powerpc/sysdev/
Ddart_iommu.c164 long npages, unsigned long uaddr, in dart_build() argument
172 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build()
179 l = npages; in dart_build()
195 while (npages--) in dart_build()
204 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument
213 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free()
217 while (npages--) in dart_free()
/linux-4.1.27/arch/tile/kernel/
Dmodule.c43 int npages; in module_alloc() local
45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in module_alloc()
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); in module_alloc()
49 for (; i < npages; ++i) { in module_alloc()
58 area->nr_pages = npages; in module_alloc()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c204 struct nvkm_oclass *oclass, u32 npages, u32 align, in gk20a_instobj_ctor_dma() argument
218 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, in gk20a_instobj_ctor_dma()
234 node->r.length = (npages << PAGE_SHIFT) >> 12; in gk20a_instobj_ctor_dma()
246 struct nvkm_oclass *oclass, u32 npages, u32 align, in gk20a_instobj_ctor_iommu() argument
256 sizeof(*node) + sizeof(node->pages[0]) * npages, in gk20a_instobj_ctor_iommu()
263 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
275 ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages, in gk20a_instobj_ctor_iommu()
284 for (i = 0; i < npages; i++) { in gk20a_instobj_ctor_iommu()
317 for (i = 0; i < npages && node->pages[i] != NULL; i++) in gk20a_instobj_ctor_iommu()
/linux-4.1.27/arch/powerpc/platforms/pseries/
Diommu.c84 long npages, unsigned long uaddr, in tce_build_pSeries() argument
99 while (npages--) { in tce_build_pSeries()
114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument
120 while (npages--) in tce_free_pSeries()
140 long npages, unsigned long uaddr, in tce_build_pSeriesLP() argument
148 long tcenum_start = tcenum, npages_start = npages; in tce_build_pSeriesLP()
155 while (npages--) { in tce_build_pSeriesLP()
162 (npages_start - (npages + 1))); in tce_build_pSeriesLP()
183 long npages, unsigned long uaddr, in tce_buildmulti_pSeriesLP() argument
192 long tcenum_start = tcenum, npages_start = npages; in tce_buildmulti_pSeriesLP()
[all …]
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c109 unsigned long npages; in usnic_uiom_get_pages() local
129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages()
133 locked = npages + current->mm->locked_vm; in usnic_uiom_get_pages()
146 while (npages) { in usnic_uiom_get_pages()
148 min_t(unsigned long, npages, in usnic_uiom_get_pages()
155 npages -= ret; in usnic_uiom_get_pages()
220 int npages; in __usnic_uiom_reg_release() local
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
228 vpn_last = vpn_start + npages - 1; in __usnic_uiom_reg_release()
342 unsigned long npages; in usnic_uiom_reg_get() local
[all …]
/linux-4.1.27/fs/freevxfs/
Dvxfs_lookup.c116 u_long npages, page, nblocks, pblocks, block; in vxfs_find_entry() local
121 npages = dir_pages(ip); in vxfs_find_entry()
125 for (page = 0; page < npages; page++) { in vxfs_find_entry()
243 u_long page, npages, block, pblocks, nblocks, offset; in vxfs_readdir() local
261 npages = dir_pages(ip); in vxfs_readdir()
269 for (; page < npages; page++, block = 0) { in vxfs_readdir()
/linux-4.1.27/arch/arm/kernel/
Dprocess.c340 unsigned int npages) in sigpage_addr() argument
350 last = TASK_SIZE - (npages << PAGE_SHIFT); in sigpage_addr()
381 unsigned long npages; in arch_setup_additional_pages() local
391 npages = 1; /* for sigpage */ in arch_setup_additional_pages()
392 npages += vdso_total_pages; in arch_setup_additional_pages()
395 hint = sigpage_addr(mm, npages); in arch_setup_additional_pages()
396 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); in arch_setup_additional_pages()
/linux-4.1.27/fs/sysv/
Ddir.c73 unsigned long npages = dir_pages(inode); in sysv_readdir() local
84 for ( ; n < npages; n++, offset = 0) { in sysv_readdir()
137 unsigned long npages = dir_pages(dir); in sysv_find_entry() local
144 if (start >= npages) in sysv_find_entry()
165 if (++n >= npages) in sysv_find_entry()
184 unsigned long npages = dir_pages(dir); in sysv_add_link() local
191 for (n = 0; n <= npages; n++) { in sysv_add_link()
292 unsigned long i, npages = dir_pages(inode); in sysv_empty_dir() local
294 for (i = 0; i < npages; i++) { in sysv_empty_dir()
/linux-4.1.27/drivers/gpu/drm/udl/
Dudl_dmabuf.c221 int npages; in udl_prime_create() local
223 npages = size / PAGE_SIZE; in udl_prime_create()
226 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); in udl_prime_create()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in udl_prime_create()
233 DRM_ERROR("obj pages is NULL %d\n", npages); in udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); in udl_prime_create()
/linux-4.1.27/fs/qnx6/
Ddir.c122 unsigned long npages = dir_pages(inode); in qnx6_readdir() local
131 for ( ; !done && n < npages; n++, start = 0) { in qnx6_readdir()
224 unsigned long npages = dir_pages(dir); in qnx6_find_entry() local
231 if (npages == 0) in qnx6_find_entry()
234 if (start >= npages) in qnx6_find_entry()
266 if (++n >= npages) in qnx6_find_entry()
/linux-4.1.27/drivers/gpu/drm/msm/
Dmsm_gem.c43 int npages) in get_pages_vram() argument
51 p = drm_malloc_ab(npages, sizeof(struct page *)); in get_pages_vram()
56 npages, 0, DRM_MM_SEARCH_DEFAULT); in get_pages_vram()
63 for (i = 0; i < npages; i++) { in get_pages_vram()
79 int npages = obj->size >> PAGE_SHIFT; in get_pages() local
84 p = get_pages_vram(obj, npages); in get_pages()
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); in get_pages()
670 int ret, npages; in msm_gem_import() local
686 npages = size / PAGE_SIZE; in msm_gem_import()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in msm_gem_import()
[all …]
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dmem.c396 struct c4iw_mr *mhp, int shift, int npages) in reregister_mem() argument
401 if (npages > mhp->attr.pbl_size) in reregister_mem()
421 static int alloc_pbl(struct c4iw_mr *mhp, int npages) in alloc_pbl() argument
424 npages << 3); in alloc_pbl()
429 mhp->attr.pbl_size = npages; in alloc_pbl()
436 u64 *total_size, int *npages, in build_phys_page_list() argument
473 *npages = 0; in build_phys_page_list()
475 *npages += (buffer_list[i].size + in build_phys_page_list()
478 if (!*npages) in build_phys_page_list()
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_user_sdma.c277 unsigned long addr, int tlen, int npages) in ipath_user_sdma_pin_pages() argument
283 ret = get_user_pages_fast(addr, npages, 0, pages); in ipath_user_sdma_pin_pages()
284 if (ret != npages) { in ipath_user_sdma_pin_pages()
294 for (j = 0; j < npages; j++) { in ipath_user_sdma_pin_pages()
331 const int npages = ipath_user_sdma_num_pages(iov + idx); in ipath_user_sdma_pin_pkt() local
336 npages); in ipath_user_sdma_pin_pkt()
355 unsigned long niov, int npages) in ipath_user_sdma_init_payload() argument
359 if (npages >= ARRAY_SIZE(pkt->addr)) in ipath_user_sdma_init_payload()
416 int npages = 0; in ipath_user_sdma_queue_pkts() local
492 npages++; in ipath_user_sdma_queue_pkts()
[all …]
/linux-4.1.27/arch/x86/include/asm/
Dtce.h42 unsigned int npages, unsigned long uaddr, int direction);
43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
/linux-4.1.27/fs/nilfs2/
Ddir.c266 unsigned long npages = dir_pages(inode); in nilfs_readdir() local
272 for ( ; n < npages; n++, offset = 0) { in nilfs_readdir()
331 unsigned long npages = dir_pages(dir); in nilfs_find_entry() local
336 if (npages == 0) in nilfs_find_entry()
343 if (start >= npages) in nilfs_find_entry()
366 if (++n >= npages) in nilfs_find_entry()
446 unsigned long npages = dir_pages(dir); in nilfs_add_link() local
457 for (n = 0; n <= npages; n++) { in nilfs_add_link()
621 unsigned long i, npages = dir_pages(inode); in nilfs_empty_dir() local
623 for (i = 0; i < npages; i++) { in nilfs_empty_dir()
/linux-4.1.27/arch/powerpc/platforms/powernv/
Dpci.c576 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, in pnv_tce_build() argument
592 while (npages--) in pnv_tce_build()
606 static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages, in pnv_tce_build_vm() argument
611 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, in pnv_tce_build_vm()
615 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages, in pnv_tce_free() argument
622 while (npages--) in pnv_tce_free()
629 static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages) in pnv_tce_free_vm() argument
631 pnv_tce_free(tbl, index, npages, false); in pnv_tce_free_vm()
639 static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages, in pnv_tce_build_rm() argument
644 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true); in pnv_tce_build_rm()
[all …]
/linux-4.1.27/include/linux/
Diommu-common.h42 unsigned long npages,
48 u64 dma_addr, unsigned long npages,
Dkvm_host.h286 unsigned long npages; member
296 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
520 unsigned long npages);
820 gfn < memslots[slot].base_gfn + memslots[slot].npages) in search_memslots()
833 gfn < memslots[start].base_gfn + memslots[start].npages) { in search_memslots()
Dnvme.h135 int npages; /* In the PRP list. 0 means small pool in use */ member
Defi.h1040 static inline void memrange_efi_to_native(u64 *addr, u64 *npages) in memrange_efi_to_native() argument
1042 *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); in memrange_efi_to_native()
Dnfs_xdr.h713 unsigned int npages; member
1311 unsigned int npages; /* Max length of pagevec */ member
Dmm.h1862 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
/linux-4.1.27/virt/kvm/
Dkvm_main.c564 free->npages = 0; in kvm_free_physmem_slot()
672 if (!new->npages) { in update_memslots()
673 WARN_ON(!mslots[i].npages); in update_memslots()
676 if (mslots[i].npages) in update_memslots()
679 if (!mslots[i].npages) in update_memslots()
685 if (!mslots[i + 1].npages) in update_memslots()
701 if (new->npages) { in update_memslots()
769 unsigned long npages; in __kvm_set_memory_region() local
799 npages = mem->memory_size >> PAGE_SHIFT; in __kvm_set_memory_region()
801 if (npages > KVM_MEM_MAX_NR_PAGES) in __kvm_set_memory_region()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/
Decho_client.c170 struct page **pages, int npages, int async);
1172 struct page **pages, int npages, int async) in cl_echo_object_brw() argument
1207 offset + npages * PAGE_CACHE_SIZE - 1, in cl_echo_object_brw()
1213 for (i = 0; i < npages; i++) { in cl_echo_object_brw()
1551 u32 npages; in echo_client_kbrw() local
1577 npages = count >> PAGE_CACHE_SHIFT; in echo_client_kbrw()
1582 OBD_ALLOC(pga, npages * sizeof(*pga)); in echo_client_kbrw()
1586 OBD_ALLOC(pages, npages * sizeof(*pages)); in echo_client_kbrw()
1588 OBD_FREE(pga, npages * sizeof(*pga)); in echo_client_kbrw()
1593 i < npages; in echo_client_kbrw()
[all …]
/linux-4.1.27/fs/exofs/
Ddir.c248 unsigned long npages = dir_pages(inode); in exofs_readdir() local
255 for ( ; n < npages; n++, offset = 0) { in exofs_readdir()
316 unsigned long npages = dir_pages(dir); in exofs_find_entry() local
321 if (npages == 0) in exofs_find_entry()
327 if (start >= npages) in exofs_find_entry()
351 if (++n >= npages) in exofs_find_entry()
441 unsigned long npages = dir_pages(dir); in exofs_add_link() local
447 for (n = 0; n <= npages; n++) { in exofs_add_link()
618 unsigned long i, npages = dir_pages(inode); in exofs_empty_dir() local
620 for (i = 0; i < npages; i++) { in exofs_empty_dir()
/linux-4.1.27/fs/ufs/
Ddir.c259 unsigned long npages = ufs_dir_pages(dir); in ufs_find_entry() local
266 if (npages == 0 || namelen > UFS_MAXNAMLEN) in ufs_find_entry()
274 if (start >= npages) in ufs_find_entry()
297 if (++n >= npages) in ufs_find_entry()
323 unsigned long npages = ufs_dir_pages(dir); in ufs_add_link() local
336 for (n = 0; n <= npages; n++) { in ufs_add_link()
440 unsigned long npages = ufs_dir_pages(inode); in ufs_readdir() local
450 for ( ; n < npages; n++, offset = 0) { in ufs_readdir()
611 unsigned long i, npages = ufs_dir_pages(inode); in ufs_empty_dir() local
613 for (i = 0; i < npages; i++) { in ufs_empty_dir()
/linux-4.1.27/fs/ext2/
Ddir.c297 unsigned long npages = dir_pages(inode); in ext2_readdir() local
308 for ( ; n < npages; n++, offset = 0) { in ext2_readdir()
373 unsigned long npages = dir_pages(dir); in ext2_find_entry() local
379 if (npages == 0) in ext2_find_entry()
386 if (start >= npages) in ext2_find_entry()
411 if (++n >= npages) in ext2_find_entry()
497 unsigned long npages = dir_pages(dir); in ext2_add_link() local
508 for (n = 0; n <= npages; n++) { in ext2_add_link()
673 unsigned long i, npages = dir_pages(inode); in ext2_empty_dir() local
676 for (i = 0; i < npages; i++) { in ext2_empty_dir()
/linux-4.1.27/fs/minix/
Ddir.c91 unsigned long npages = dir_pages(inode); in minix_readdir() local
103 for ( ; n < npages; n++, offset = 0) { in minix_readdir()
163 unsigned long npages = dir_pages(dir); in minix_find_entry() local
171 for (n = 0; n < npages; n++) { in minix_find_entry()
212 unsigned long npages = dir_pages(dir); in minix_add_link() local
227 for (n = 0; n <= npages; n++) { in minix_add_link()
368 unsigned long i, npages = dir_pages(inode); in minix_empty_dir() local
373 for (i = 0; i < npages; i++) { in minix_empty_dir()
/linux-4.1.27/drivers/gpu/drm/
Ddrm_gem.c466 int i, npages; in drm_gem_get_pages() local
477 npages = obj->size >> PAGE_SHIFT; in drm_gem_get_pages()
479 pages = drm_malloc_ab(npages, sizeof(struct page *)); in drm_gem_get_pages()
483 for (i = 0; i < npages; i++) { in drm_gem_get_pages()
519 int i, npages; in drm_gem_put_pages() local
527 npages = obj->size >> PAGE_SHIFT; in drm_gem_put_pages()
529 for (i = 0; i < npages; i++) { in drm_gem_put_pages()
/linux-4.1.27/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c419 unsigned int npages, in exynos_gem_get_pages_from_userptr() argument
429 for (i = 0; i < npages; ++i, start += PAGE_SIZE) { in exynos_gem_get_pages_from_userptr()
438 if (i != npages) { in exynos_gem_get_pages_from_userptr()
447 npages, 1, 1, pages, NULL); in exynos_gem_get_pages_from_userptr()
449 if (get_npages != npages) { in exynos_gem_get_pages_from_userptr()
460 unsigned int npages, in exynos_gem_put_pages_to_userptr() argument
466 for (i = 0; i < npages; i++) { in exynos_gem_put_pages_to_userptr()
Dexynos_drm_gem.h163 unsigned int npages,
169 unsigned int npages,
Dexynos_drm_g2d.c194 unsigned int npages; member
386 g2d_userptr->npages, in g2d_userptr_put_dma_addr()
415 unsigned int npages, offset; in g2d_userptr_get_dma_addr() local
463 npages = (end - start) >> PAGE_SHIFT; in g2d_userptr_get_dma_addr()
464 g2d_userptr->npages = npages; in g2d_userptr_get_dma_addr()
466 pages = drm_calloc_large(npages, sizeof(struct page *)); in g2d_userptr_get_dma_addr()
500 npages, pages, vma); in g2d_userptr_get_dma_addr()
516 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, in g2d_userptr_get_dma_addr()
537 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { in g2d_userptr_get_dma_addr()
538 g2d->current_pool += npages << PAGE_SHIFT; in g2d_userptr_get_dma_addr()
[all …]
/linux-4.1.27/drivers/gpu/drm/omapdrm/
Domap_gem.c230 int npages = obj->size >> PAGE_SHIFT; in omap_gem_attach_pages() local
246 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
252 for (i = 0; i < npages; i++) { in omap_gem_attach_pages()
257 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
284 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_detach_pages() local
285 for (i = 0; i < npages; i++) { in omap_gem_detach_pages()
664 uint32_t npages = obj->size >> PAGE_SHIFT; in omap_gem_roll() local
667 if (roll > npages) { in omap_gem_roll()
682 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); in omap_gem_roll()
716 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_dma_sync() local
[all …]
Domap_fbdev.c51 int npages; in pan_worker() local
54 npages = fbi->fix.line_length >> PAGE_SHIFT; in pan_worker()
55 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); in pan_worker()
Domap_dmm_tiler.c205 struct page **pages, uint32_t npages, uint32_t roll) in dmm_txn_append() argument
237 if (n >= npages) in dmm_txn_append()
238 n -= npages; in dmm_txn_append()
305 uint32_t npages, uint32_t roll, bool wait) in fill() argument
321 dmm_txn_append(txn, &p_area, pages, npages, roll); in fill()
338 uint32_t npages, uint32_t roll, bool wait) in tiler_pin() argument
342 ret = fill(&block->area, pages, npages, roll, wait); in tiler_pin()
Domap_dmm_tiler.h92 uint32_t npages, uint32_t roll, bool wait);
/linux-4.1.27/arch/x86/platform/efi/
Defi.c513 u64 addr, npages; in efi_set_executable() local
516 npages = md->num_pages; in efi_set_executable()
518 memrange_efi_to_native(&addr, &npages); in efi_set_executable()
521 set_memory_x(addr, npages); in efi_set_executable()
523 set_memory_nx(addr, npages); in efi_set_executable()
545 u64 npages; in efi_memory_uc() local
547 npages = round_up(size, page_shift) / page_shift; in efi_memory_uc()
548 memrange_efi_to_native(&addr, &npages); in efi_memory_uc()
549 set_memory_uc(addr, npages); in efi_memory_uc()
Defi_64.c148 unsigned npages; in efi_setup_page_tables() local
186 npages = (_end - _text) >> PAGE_SHIFT; in efi_setup_page_tables()
189 if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { in efi_setup_page_tables()
/linux-4.1.27/arch/powerpc/include/asm/
Dmachdep.h70 long npages,
76 long npages);
84 long npages,
90 long npages);
Diommu.h197 unsigned long npages);
Dkvm_book3s_64.h388 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
Dkvm_ppc.h182 unsigned long npages);
260 unsigned long npages);
/linux-4.1.27/drivers/gpu/drm/nouveau/
Dnouveau_prime.c34 int npages = nvbo->bo.num_pages; in nouveau_gem_prime_get_sg_table() local
36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); in nouveau_gem_prime_get_sg_table()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_user_sdma.c665 unsigned long addr, int tlen, int npages) in qib_user_sdma_pin_pages() argument
671 while (npages) { in qib_user_sdma_pin_pages()
672 if (npages > 8) in qib_user_sdma_pin_pages()
675 j = npages; in qib_user_sdma_pin_pages()
705 npages -= j; in qib_user_sdma_pin_pages()
729 const int npages = qib_user_sdma_num_pages(iov + idx); in qib_user_sdma_pin_pkt() local
733 iov[idx].iov_len, npages); in qib_user_sdma_pin_pkt()
765 unsigned long niov, int npages) in qib_user_sdma_init_payload() argument
770 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
831 int npages = 0; in qib_user_sdma_queue_pkts() local
[all …]
/linux-4.1.27/drivers/vfio/
Dvfio_iommu_spapr_tce.c53 unsigned long locked, lock_limit, npages; in tce_iommu_enable() local
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; in tce_iommu_enable()
85 locked = current->mm->locked_vm + npages; in tce_iommu_enable()
93 current->mm->locked_vm += npages; in tce_iommu_enable()
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
Drouter.c1235 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) in lnet_destroy_rtrbuf() argument
1237 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); in lnet_destroy_rtrbuf()
1239 while (--npages >= 0) in lnet_destroy_rtrbuf()
1240 __free_page(rb->rb_kiov[npages].kiov_page); in lnet_destroy_rtrbuf()
1248 int npages = rbp->rbp_npages; in lnet_new_rtrbuf() local
1249 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); in lnet_new_rtrbuf()
1260 for (i = 0; i < npages; i++) { in lnet_new_rtrbuf()
1283 int npages = rbp->rbp_npages; in lnet_rtrpool_free_bufs() local
1299 lnet_destroy_rtrbuf(rb, npages); in lnet_rtrpool_free_bufs()
1344 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages) in lnet_rtrpool_init() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_prime.c35 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() local
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table()
/linux-4.1.27/arch/s390/pci/
Dpci_dma.c321 int npages; in s390_dma_unmap_pages() local
323 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in s390_dma_unmap_pages()
325 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, in s390_dma_unmap_pages()
331 atomic64_add(npages, &zdev->unmapped_pages); in s390_dma_unmap_pages()
333 dma_free_iommu(zdev, iommu_page_index, npages); in s390_dma_unmap_pages()
/linux-4.1.27/drivers/staging/android/ion/
Dion_heap.c36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; in ion_heap_map_kernel() local
37 struct page **pages = vmalloc(sizeof(struct page *) * npages); in ion_heap_map_kernel()
52 BUG_ON(i >= npages); in ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); in ion_heap_map_kernel()
/linux-4.1.27/arch/powerpc/mm/
Dsubpage-prot.c61 int npages) in hpte_flush_range() argument
80 for (; npages > 0; --npages) { in hpte_flush_range()
Dhugetlbpage.c342 unsigned long npages; in do_gpage_early_setup() local
356 if (sscanf(val, "%lu", &npages) <= 0) in do_gpage_early_setup()
357 npages = 0; in do_gpage_early_setup()
358 if (npages > MAX_NUMBER_GPAGES) { in do_gpage_early_setup()
362 npages, size / 1024); in do_gpage_early_setup()
363 npages = MAX_NUMBER_GPAGES; in do_gpage_early_setup()
365 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; in do_gpage_early_setup()
/linux-4.1.27/drivers/xen/
Dprivcmd.c210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || in mmap_mfn_range()
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) in mmap_mfn_range()
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_mfn_range()
221 msg->mfn, msg->npages, in mmap_mfn_range()
227 st->va += msg->npages << PAGE_SHIFT; in mmap_mfn_range()
/linux-4.1.27/fs/fuse/
Ddev.c39 unsigned npages) in fuse_request_init() argument
42 memset(pages, 0, sizeof(*pages) * npages); in fuse_request_init()
43 memset(page_descs, 0, sizeof(*page_descs) * npages); in fuse_request_init()
50 req->max_pages = npages; in fuse_request_init()
53 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) in __fuse_request_alloc() argument
60 if (npages <= FUSE_REQ_INLINE_PAGES) { in __fuse_request_alloc()
64 pages = kmalloc(sizeof(struct page *) * npages, flags); in __fuse_request_alloc()
66 npages, flags); in __fuse_request_alloc()
76 fuse_request_init(req, pages, page_descs, npages); in __fuse_request_alloc()
81 struct fuse_req *fuse_request_alloc(unsigned npages) in fuse_request_alloc() argument
[all …]
Dfuse_i.h759 struct fuse_req *fuse_request_alloc(unsigned npages);
761 struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
772 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
774 unsigned npages);
Dfile.c1267 unsigned npages; in fuse_get_user_pages() local
1281 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; in fuse_get_user_pages()
1284 fuse_page_descs_length_init(req, req->num_pages, npages); in fuse_get_user_pages()
1286 req->num_pages += npages; in fuse_get_user_pages()
/linux-4.1.27/fs/f2fs/
Ddebug.c129 unsigned npages; in update_mem_info() local
195 npages = NODE_MAPPING(sbi)->nrpages; in update_mem_info()
196 si->page_mem += npages << PAGE_CACHE_SHIFT; in update_mem_info()
197 npages = META_MAPPING(sbi)->nrpages; in update_mem_info()
198 si->page_mem += npages << PAGE_CACHE_SHIFT; in update_mem_info()
Ddir.c212 unsigned long npages = dir_blocks(dir); in f2fs_find_entry() local
223 if (npages == 0) in f2fs_find_entry()
764 unsigned long npages = dir_blocks(inode); in f2fs_readdir() local
775 if (npages - n > 1 && !ra_has_index(ra, n)) in f2fs_readdir()
777 min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); in f2fs_readdir()
779 for (; n < npages; n++) { in f2fs_readdir()
Dsegment.c1473 int npages = npages_for_summary_flush(sbi, true); in restore_curseg_summaries() local
1475 if (npages >= 2) in restore_curseg_summaries()
1476 ra_meta_pages(sbi, start_sum_block(sbi), npages, in restore_curseg_summaries()
Ddata.c79 int npages, bool is_read) in __bio_alloc() argument
84 bio = bio_alloc(GFP_NOIO, npages); in __bio_alloc()
/linux-4.1.27/arch/parisc/mm/
Dinit.c294 unsigned long npages; in setup_bootmem() local
297 npages = pmem_ranges[i].pages; in setup_bootmem()
302 (start_pfn + npages) ); in setup_bootmem()
305 (npages << PAGE_SHIFT) ); in setup_bootmem()
307 if ((start_pfn + npages) > max_pfn) in setup_bootmem()
308 max_pfn = start_pfn + npages; in setup_bootmem()
/linux-4.1.27/arch/powerpc/platforms/cell/
Diommu.c167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, in tce_build_cell() argument
201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) in tce_build_cell()
206 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
209 index, npages, direction, base_pte); in tce_build_cell()
213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) in tce_free_cell() argument
221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); in tce_free_cell()
235 for (i = 0; i < npages; i++) in tce_free_cell()
240 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
/linux-4.1.27/net/sunrpc/auth_gss/
Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++) in gssp_free_receive_pages()
226 arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); in gssp_alloc_receive_pages()
227 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); in gssp_alloc_receive_pages()
Dgss_rpc_xdr.h151 unsigned int npages; member
Dgss_rpc_xdr.c784 arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); in gssx_enc_accept_sec_context()
/linux-4.1.27/arch/parisc/kernel/
Dinventory.c135 unsigned long npages; in pagezero_memconfig() local
150 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); in pagezero_memconfig()
151 set_pmem_entry(pmem_ranges,0UL,npages); in pagezero_memconfig()
/linux-4.1.27/net/sunrpc/xprtrdma/
Drpc_rdma.c306 int i, npages, curlen; in rpcrdma_inline_pullup() local
345 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; in rpcrdma_inline_pullup()
346 for (i = 0; copy_len && i < npages; i++) { in rpcrdma_inline_pullup()
613 int i, npages, curlen, olen; in rpcrdma_inline_fixup() local
640 npages = PAGE_ALIGN(page_base + in rpcrdma_inline_fixup()
642 for (; i < npages; i++) { in rpcrdma_inline_fixup()
/linux-4.1.27/arch/mips/kvm/
Dmips.c194 unsigned long npages) in kvm_arch_create_memslot() argument
212 unsigned long npages = 0; in kvm_arch_commit_memory_region() local
222 npages = mem->memory_size >> PAGE_SHIFT; in kvm_arch_commit_memory_region()
224 if (npages) { in kvm_arch_commit_memory_region()
225 kvm->arch.guest_pmap_npages = npages; in kvm_arch_commit_memory_region()
227 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); in kvm_arch_commit_memory_region()
235 npages, kvm->arch.guest_pmap); in kvm_arch_commit_memory_region()
238 for (i = 0; i < npages; i++) in kvm_arch_commit_memory_region()
991 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log()
/linux-4.1.27/include/uapi/xen/
Dprivcmd.h48 __u64 npages; member
/linux-4.1.27/arch/x86/vdso/
Dvma.c30 int npages = (image->size) / PAGE_SIZE; in init_vdso_image() local
33 for (i = 0; i < npages; i++) in init_vdso_image()
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/
Dmodule.c155 static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, in kportal_memhog_alloc() argument
167 if (npages < 0) in kportal_memhog_alloc()
170 if (npages == 0) in kportal_memhog_alloc()
183 while (ldu->ldu_memhog_pages < npages && in kportal_memhog_alloc()
198 while (ldu->ldu_memhog_pages < npages && in kportal_memhog_alloc()
/linux-4.1.27/include/rdma/
Dib_umem.h57 int npages; member
/linux-4.1.27/net/sunrpc/
Dsvcsock.c914 unsigned int i, len, npages; in svc_tcp_restore_pages() local
919 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_restore_pages()
920 for (i = 0; i < npages; i++) { in svc_tcp_restore_pages()
933 unsigned int i, len, npages; in svc_tcp_save_pages() local
938 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_save_pages()
939 for (i = 0; i < npages; i++) { in svc_tcp_save_pages()
947 unsigned int i, len, npages; in svc_tcp_clear_pages() local
952 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_clear_pages()
953 for (i = 0; i < npages; i++) { in svc_tcp_clear_pages()
/linux-4.1.27/include/crypto/
Dif_alg.h68 unsigned int npages; member
/linux-4.1.27/fs/nfs/blocklayout/
Dblocklayout.c241 header->page_array.npages, f_offset, in bl_read_pagelist()
253 for (i = pg_index; i < header->page_array.npages; i++) { in bl_read_pagelist()
290 header->page_array.npages - i, in bl_read_pagelist()
403 for (i = pg_index; i < header->page_array.npages; i++) { in bl_write_pagelist()
417 bio = do_add_page_to_bio(bio, header->page_array.npages - i, in bl_write_pagelist()
/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event_intel_pt.c619 unsigned long idx, npages, wakeup; in pt_buffer_reset_markers() local
640 npages = handle->size >> PAGE_SHIFT; in pt_buffer_reset_markers()
644 npages++; in pt_buffer_reset_markers()
646 idx = (head >> PAGE_SHIFT) + npages; in pt_buffer_reset_markers()
653 idx = (head >> PAGE_SHIFT) + npages - 1; in pt_buffer_reset_markers()
/linux-4.1.27/fs/jfs/
Djfs_dmap.c4048 #define BMAPPGTOLEV(npages) \ argument
4049 (((npages) <= 3 + MAXL0PAGES) ? 0 : \
4050 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
4056 s64 npages, ndmaps; in dbMapFileSizeToMapSize() local
4061 npages = nblocks >> JFS_SBI(sb)->l2nbperpage; in dbMapFileSizeToMapSize()
4062 level = BMAPPGTOLEV(npages); in dbMapFileSizeToMapSize()
4069 npages--; /* skip the first global control page */ in dbMapFileSizeToMapSize()
4071 npages -= (2 - level); in dbMapFileSizeToMapSize()
4072 npages--; /* skip top level's control page */ in dbMapFileSizeToMapSize()
4076 complete = (u32) npages / factor; in dbMapFileSizeToMapSize()
[all …]
Djfs_logmgr.c2392 int npages = 0; in lmLogFormat() local
2403 npages = logSize >> sbi->l2nbperpage; in lmLogFormat()
2423 logsuper->size = cpu_to_le32(npages); in lmLogFormat()
2461 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); in lmLogFormat()
2480 for (lspn = 0; lspn < npages - 3; lspn++) { in lmLogFormat()
Djfs_xtree.c2576 int nb, npages, nblks; in xtRelocate() local
2676 npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE; in xtRelocate()
2686 offset += nb, pno++, npages--) { in xtRelocate()
2691 if (rc = cmRead(ip, offset, npages, &cp)) in xtRelocate()
/linux-4.1.27/drivers/block/drbd/
Ddrbd_bitmap.c640 struct page **npages, **opages = NULL; in drbd_bm_resize() local
697 npages = b->bm_pages; in drbd_bm_resize()
700 npages = NULL; in drbd_bm_resize()
702 npages = bm_realloc_pages(b, want); in drbd_bm_resize()
705 if (!npages) { in drbd_bm_resize()
719 b->bm_pages = npages; in drbd_bm_resize()
742 if (opages != npages) in drbd_bm_resize()
/linux-4.1.27/include/linux/mlx4/
Ddevice.h600 int npages; member
1034 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1040 int npages, int page_shift, struct mlx4_mr *mr);
1048 int start_index, int npages, u64 *page_list);
1326 int npages, u64 iova, u32 *lkey, u32 *rkey);
1439 u64 iova, u64 size, int npages,
/linux-4.1.27/drivers/usb/mon/
Dmon_bin.c220 static int mon_alloc_buff(struct mon_pgmap *map, int npages);
221 static void mon_free_buff(struct mon_pgmap *map, int npages);
1304 static int mon_alloc_buff(struct mon_pgmap *map, int npages) in mon_alloc_buff() argument
1309 for (n = 0; n < npages; n++) { in mon_alloc_buff()
1322 static void mon_free_buff(struct mon_pgmap *map, int npages) in mon_free_buff() argument
1326 for (n = 0; n < npages; n++) in mon_free_buff()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Ddir.c157 int npages; in ll_dir_filler() local
173 for (npages = 1; npages < max_pages; npages++) { in ll_dir_filler()
177 page_pool[npages] = page; in ll_dir_filler()
182 op_data->op_npages = npages; in ll_dir_filler()
204 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages); in ll_dir_filler()
207 for (i = 1; i < npages; i++) { in ll_dir_filler()
Drw26.c216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) in ll_free_user_pages() argument
220 for (i = 0; i < npages; i++) { in ll_free_user_pages()
/linux-4.1.27/sound/pci/emu10k1/
Dmemory.c103 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) in search_empty_map_area() argument
106 int max_size = npages; in search_empty_map_area()
116 if (size == npages) { in search_empty_map_area()
/linux-4.1.27/arch/arm/kvm/
Dmmu.c358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
749 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot()
1164 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
1485 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
1756 if (memslot->base_gfn + memslot->npages >= in kvm_arch_prepare_memory_region()
1833 unsigned long npages) in kvm_arch_create_memslot() argument
1859 phys_addr_t size = slot->npages << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c1089 int npages = p->ibp_npages; in kiblnd_free_pages() local
1092 for (i = 0; i < npages; i++) { in kiblnd_free_pages()
1097 LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_free_pages()
1100 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) in kiblnd_alloc_pages() argument
1106 offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_alloc_pages()
1108 CERROR("Can't allocate descriptor for %d pages\n", npages); in kiblnd_alloc_pages()
1112 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_alloc_pages()
1113 p->ibp_npages = npages; in kiblnd_alloc_pages()
1115 for (i = 0; i < npages; i++) { in kiblnd_alloc_pages()
1120 CERROR("Can't allocate page %d of %d\n", i, npages); in kiblnd_alloc_pages()
[all …]
Do2iblnd.h961 int npages, __u64 iov, kib_fmr_t *fmr);
981 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
Do2iblnd_cb.c556 int npages; in kiblnd_fmr_map_tx() local
567 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { in kiblnd_fmr_map_tx()
570 pages[npages++] = (rd->rd_frags[i].rf_addr & in kiblnd_fmr_map_tx()
578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); in kiblnd_fmr_map_tx()
580 CERROR("Can't map %d pages: %d\n", npages, rc); in kiblnd_fmr_map_tx()
/linux-4.1.27/drivers/infiniband/ulp/srp/
Dib_srp.h294 unsigned int npages; member
Dib_srp.c1280 state->npages, io_addr); in srp_map_finish_fmr()
1310 sizeof(state->pages[0]) * state->npages); in srp_map_finish_fr()
1317 wr.wr.fast_reg.page_list_len = state->npages; in srp_map_finish_fr()
1340 if (state->npages == 0) in srp_finish_mapping()
1343 if (state->npages == 1 && !register_always) in srp_finish_mapping()
1352 state->npages = 0; in srp_finish_mapping()
1422 if (state->npages == dev->max_pages_per_mr || offset != 0) { in srp_map_sg_entry()
1432 if (!state->npages) in srp_map_sg_entry()
1434 state->pages[state->npages++] = dma_addr & dev->mr_page_mask; in srp_map_sg_entry()
/linux-4.1.27/include/linux/mlx5/
Ddriver.h339 int npages; member
676 gfp_t flags, int npages);
706 s32 npages);
/linux-4.1.27/fs/
Dsplice.c1445 unsigned long off, npages; in get_iovec_page_array() local
1482 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in get_iovec_page_array()
1483 if (npages > pipe_buffers - buffers) in get_iovec_page_array()
1484 npages = pipe_buffers - buffers; in get_iovec_page_array()
1486 error = get_user_pages_fast((unsigned long)base, npages, in get_iovec_page_array()
1519 if (error < npages || buffers == pipe_buffers) in get_iovec_page_array()
/linux-4.1.27/drivers/edac/
Di5100_edac.c856 const unsigned long npages = i5100_npages(mci, i); in i5100_init_csrows() local
860 if (!npages) in i5100_init_csrows()
866 dimm->nr_pages = npages; in i5100_init_csrows()
876 chan, rank, (long)PAGES_TO_MiB(npages)); in i5100_init_csrows()
Dsb_edac.c853 unsigned i, j, banks, ranks, rows, cols, npages; in get_dimm_config() local
929 npages = MiB_TO_PAGES(size); in get_dimm_config()
933 size, npages, in get_dimm_config()
936 dimm->nr_pages = npages; in get_dimm_config()
Di7core_edac.c595 u32 size, npages; in get_dimm_config() local
615 npages = MiB_TO_PAGES(size); in get_dimm_config()
617 dimm->nr_pages = npages; in get_dimm_config()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dmr.c469 int npages, u64 iova) in mlx4_ib_map_phys_fmr() argument
474 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dsrq.c164 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, in mlx4_ib_create_srq()
Dmlx4_ib.h719 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
Dcq.c111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, in mlx4_ib_alloc_cq_buf()
Dqp.c780 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_cmd_parser.c829 int npages = last_page - first_page; in vmap_batch() local
832 pages = drm_malloc_ab(npages, sizeof(*pages)); in vmap_batch()
841 if (i == npages) in vmap_batch()
/linux-4.1.27/fs/afs/
Ddir.c93 __be16 npages; member
143 if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { in afs_dir_check_page()
146 ntohs(dbuf->blocks[0].pagehdr.npages)); in afs_dir_check_page()
/linux-4.1.27/mm/
Dnommu.c1564 unsigned long npages; in split_vma() local
1591 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1597 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1608 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
Dmmap.c2991 int may_expand_vm(struct mm_struct *mm, unsigned long npages) in may_expand_vm() argument
2998 if (cur + npages > lim) in may_expand_vm()
/linux-4.1.27/drivers/block/
Dnvme-core.c408 iod->npages = -1; in iod_init()
455 if (iod->npages == 0) in nvme_free_iod()
457 for (i = 0; i < iod->npages; i++) { in nvme_free_iod()
672 iod->npages = 0; in nvme_setup_prps()
675 iod->npages = 1; in nvme_setup_prps()
681 iod->npages = -1; in nvme_setup_prps()
693 list[iod->npages++] = prp_list; in nvme_setup_prps()
853 iod->npages = 0; in nvme_queue_rq()
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
Dframework.c1105 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, in sfw_alloc_pages() argument
1109 LASSERT(npages > 0 && npages <= LNET_MAX_IOV); in sfw_alloc_pages()
1111 rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); in sfw_alloc_pages()
Dselftest.h424 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
/linux-4.1.27/drivers/scsi/cxgbi/
Dlibcxgbi.c1385 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> in ddp_make_gl() local
1397 npages * (sizeof(dma_addr_t) + in ddp_make_gl()
1401 "xfer %u, %u pages, OOM.\n", xferlen, npages); in ddp_make_gl()
1406 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); in ddp_make_gl()
1408 gl->pages = (struct page **)&gl->phys_addr[npages]; in ddp_make_gl()
1409 gl->nelem = npages; in ddp_make_gl()
/linux-4.1.27/net/core/
Dskbuff.c4376 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in alloc_skb_with_frags() local
4387 if (npages > MAX_SKB_FRAGS) in alloc_skb_with_frags()
4399 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
4401 for (i = 0; npages > 0; i++) { in alloc_skb_with_frags()
4405 if (npages >= 1 << order) { in alloc_skb_with_frags()
4427 npages -= 1 << order; in alloc_skb_with_frags()
/linux-4.1.27/drivers/vhost/
Dscsi.c800 unsigned int npages = 0, offset, nbytes; in vhost_scsi_map_to_sgl() local
828 sg_set_page(sg, pages[npages], nbytes, offset); in vhost_scsi_map_to_sgl()
832 npages++; in vhost_scsi_map_to_sgl()
/linux-4.1.27/fs/cifs/
Dfile.c2906 unsigned int npages, rsize, credits; in cifs_send_async_read() local
2926 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); in cifs_send_async_read()
2929 rdata = cifs_readdata_alloc(npages, in cifs_send_async_read()
2937 rc = cifs_read_allocate_pages(rdata, npages); in cifs_send_async_read()
2942 rdata->nr_pages = npages; in cifs_send_async_read()
/linux-4.1.27/arch/s390/kvm/
Dkvm-s390.c220 last_gfn = memslot->base_gfn + memslot->npages; in kvm_s390_sync_dirty_log()
2564 unsigned long npages) in kvm_arch_create_memslot() argument
2604 old->npages * PAGE_SIZE == mem->memory_size) in kvm_arch_commit_memory_region()
/linux-4.1.27/drivers/iommu/
Dintel-iommu.c4518 unsigned int npages; in intel_iommu_unmap() local
4534 npages = last_pfn - start_pfn + 1; in intel_iommu_unmap()
4546 npages, !freelist, 0); in intel_iommu_unmap()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
Dlustre_net.h2412 unsigned npages, unsigned max_brw,
/linux-4.1.27/net/ipv4/
Dtcp_input.c4449 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); in tcp_send_rcvq() local
4451 data_len = npages << PAGE_SHIFT; in tcp_send_rcvq()