Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 200 of 208) sorted by relevance

12

/linux-4.4.14/drivers/gpu/drm/ttm/
Dttm_page_alloc.c77 unsigned npages; member
276 static void ttm_pages_put(struct page *pages[], unsigned npages) in ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) in ttm_pages_put()
280 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put()
281 for (i = 0; i < npages; ++i) in ttm_pages_put()
288 pool->npages -= freed_pages; in ttm_pool_update_free_locked()
430 count += _manager->pools[i].npages; in ttm_pool_shrink_count()
593 && count > pool->npages) { in ttm_page_pool_fill_locked()
611 pool->npages += alloc_size; in ttm_page_pool_fill_locked()
619 pool->npages += cpages; in ttm_page_pool_fill_locked()
[all …]
Dttm_page_alloc_dma.c382 struct page *pages[], unsigned npages) in ttm_dma_pages_put() argument
387 if (npages && !(pool->type & IS_CACHED) && in ttm_dma_pages_put()
388 set_pages_array_wb(pages, npages)) in ttm_dma_pages_put()
390 pool->dev_name, npages); in ttm_dma_pages_put()
942 unsigned count = 0, i, npages = 0; in ttm_dma_unpopulate() local
972 npages = pool->npages_free - _manager->options.max_size; in ttm_dma_unpopulate()
997 if (npages) in ttm_dma_unpopulate()
998 ttm_dma_page_pool_free(pool, npages, false); in ttm_dma_unpopulate()
Dttm_bo.c1187 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; in ttm_bo_acc_size() local
1191 size += PAGE_ALIGN(npages * sizeof(void *)); in ttm_bo_acc_size()
1201 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; in ttm_bo_dma_acc_size() local
1205 size += PAGE_ALIGN(npages * sizeof(void *)); in ttm_bo_dma_acc_size()
1206 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); in ttm_bo_dma_acc_size()
/linux-4.4.14/arch/sparc/kernel/
Diommu.c158 unsigned long npages) in alloc_npages() argument
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages()
204 int npages, nid; in dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent()
235 while (npages--) { in dma_4u_alloc_coherent()
251 unsigned long order, npages; in dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); in dma_4u_map_page()
[all …]
Dpci_sun4v.c43 unsigned long npages; /* Number of pages in list. */ member
57 p->npages = 0; in iommu_batch_start()
68 unsigned long npages = p->npages; in iommu_batch_flush() local
70 while (npages != 0) { in iommu_batch_flush()
74 npages, prot, __pa(pglist)); in iommu_batch_flush()
81 npages, prot, __pa(pglist), num); in iommu_batch_flush()
86 npages -= num; in iommu_batch_flush()
91 p->npages = 0; in iommu_batch_flush()
100 if (p->entry + p->npages == entry) in iommu_batch_new_entry()
112 BUG_ON(p->npages >= PGLIST_NENTS); in iommu_batch_add()
[all …]
Dldc.c1017 unsigned long entry, unsigned long npages) in ldc_demap() argument
1024 for (i = 0; i < npages; i++) { in ldc_demap()
1950 unsigned long npages) in alloc_npages() argument
1955 npages, NULL, (unsigned long)-1, 0); in alloc_npages()
2084 unsigned long i, npages; in ldc_map_sg() local
2098 npages = err; in ldc_map_sg()
2104 base = alloc_npages(iommu, npages); in ldc_map_sg()
2130 unsigned long npages, pa; in ldc_map_single() local
2142 npages = pages_in_region(pa, len); in ldc_map_single()
2146 base = alloc_npages(iommu, npages); in ldc_map_single()
[all …]
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c55 s32 npages; member
166 s32 *npages, int boot) in mlx5_cmd_query_pages() argument
184 *npages = be32_to_cpu(out.num_pages); in mlx5_cmd_query_pages()
303 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument
313 inlen = sizeof(*in) + npages * sizeof(in->pas[0]); in give_pages()
322 for (i = 0; i < npages; i++) { in give_pages()
339 in->num_entries = cpu_to_be32(npages); in give_pages()
343 func_id, npages, err); in give_pages()
346 dev->priv.fw_pages += npages; in give_pages()
351 func_id, npages, out.hdr.status); in give_pages()
[all …]
Dalloc.c72 buf->npages = 1; in mlx5_buf_alloc_node()
83 buf->npages *= 2; in mlx5_buf_alloc_node()
205 for (i = 0; i < buf->npages; i++) { in mlx5_fill_page_array()
Deq.c276 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); in mlx5_eq_int() local
279 func_id, npages); in mlx5_eq_int()
280 mlx5_core_req_pages_handler(dev, func_id, npages); in mlx5_eq_int()
356 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; in mlx5_create_map_eq()
Den_main.c383 sizeof(u64) * rq->wq_ctrl.buf.npages; in mlx5e_enable_rq()
611 sizeof(u64) * sq->wq_ctrl.buf.npages; in mlx5e_enable_sq()
808 sizeof(u64) * cq->wq_ctrl.buf.npages; in mlx5e_enable_cq()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_mem.c81 int npages) in iwch_reregister_mem() argument
87 if (npages > mhp->attr.pbl_size) in iwch_reregister_mem()
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) in iwch_alloc_pbl() argument
112 npages << 3); in iwch_alloc_pbl()
117 mhp->attr.pbl_size = npages; in iwch_alloc_pbl()
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) in iwch_write_pbl() argument
131 mhp->attr.pbl_addr + (offset << 3), npages); in iwch_write_pbl()
138 int *npages, in build_phys_page_list() argument
176 *npages = 0; in build_phys_page_list()
178 *npages += (buffer_list[i].size + in build_phys_page_list()
[all …]
Dcxio_dbg.c78 int size, npages; in cxio_dump_pbl() local
81 npages = (len + (1ULL << shift) - 1) >> shift; in cxio_dump_pbl()
82 size = npages * sizeof(u64); in cxio_dump_pbl()
93 __func__, m->addr, m->len, npages); in cxio_dump_pbl()
Diwch_provider.h81 u32 npages; member
347 int npages);
348 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
350 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
355 int *npages,
Diwch_provider.c491 int npages; in iwch_register_phys_mem() local
520 &total_size, &npages, &shift, &page_list); in iwch_register_phys_mem()
524 ret = iwch_alloc_pbl(mhp, npages); in iwch_register_phys_mem()
530 ret = iwch_write_pbl(mhp, page_list, npages, 0); in iwch_register_phys_mem()
543 mhp->attr.pbl_size = npages; in iwch_register_phys_mem()
573 int npages = 0; in iwch_reregister_phys_mem() local
599 &total_size, &npages, in iwch_reregister_phys_mem()
605 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); in iwch_reregister_phys_mem()
619 mhp->attr.pbl_size = npages; in iwch_reregister_phys_mem()
868 if (unlikely(mhp->npages == mhp->attr.pbl_size)) in iwch_set_page()
[all …]
Diwch_qp.c156 if (mhp->npages > T3_MAX_FASTREG_DEPTH) in build_memreg()
165 V_FR_PAGE_COUNT(mhp->npages) | in build_memreg()
170 for (i = 0; i < mhp->npages; i++, p++) { in build_memreg()
179 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG, in build_memreg()
186 *flit_cnt = 5 + mhp->npages; in build_memreg()
/linux-4.4.14/arch/powerpc/kernel/
Diommu.c178 unsigned long npages, in iommu_range_alloc() argument
185 int largealloc = npages > 15; in iommu_range_alloc()
198 if (unlikely(npages == 0)) { in iommu_range_alloc()
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
285 end = n + npages; in iommu_range_alloc()
307 void *page, unsigned int npages, in iommu_alloc() argument
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
325 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
335 __iommu_free(tbl, ret, npages); in iommu_alloc()
350 unsigned int npages) in iommu_free_check() argument
[all …]
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, in mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, in mthca_alloc_icm() argument
157 while (npages > 0) { in mthca_alloc_icm()
165 chunk->npages = 0; in mthca_alloc_icm()
170 while (1 << cur_order > npages) in mthca_alloc_icm()
175 &chunk->mem[chunk->npages], in mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], in mthca_alloc_icm()
182 ++chunk->npages; in mthca_alloc_icm()
[all …]
Dmthca_allocator.c199 int npages, shift; in mthca_buf_alloc() local
206 npages = 1; in mthca_buf_alloc()
220 npages *= 2; in mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, in mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) in mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { in mthca_buf_alloc()
[all …]
Dmthca_memfree.h53 int npages; member
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member
Dmthca_eq.c470 int npages; in mthca_create_eq() local
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mthca_create_eq()
487 for (i = 0; i < npages; ++i) in mthca_create_eq()
490 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mthca_create_eq()
499 for (i = 0; i < npages; ++i) { in mthca_create_eq()
519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq()
520 0, npages * PAGE_SIZE, in mthca_create_eq()
571 for (i = 0; i < npages; ++i) in mthca_create_eq()
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq() local
[all …]
Dmthca_provider.c912 int npages; in mthca_reg_phys_mr() local
939 npages = 0; in mthca_reg_phys_mr()
941 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; in mthca_reg_phys_mr()
943 if (!npages) in mthca_reg_phys_mr()
946 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); in mthca_reg_phys_mr()
964 shift, npages); in mthca_reg_phys_mr()
968 page_list, shift, npages, in mthca_reg_phys_mr()
/linux-4.4.14/drivers/infiniband/core/
Dumem.c58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release()
92 unsigned long npages; in ib_umem_get() local
164 npages = ib_umem_num_pages(umem); in ib_umem_get()
168 locked = npages + current->mm->pinned_vm; in ib_umem_get()
178 if (npages == 0) { in ib_umem_get()
183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); in ib_umem_get()
190 while (npages) { in ib_umem_get()
192 min_t(unsigned long, npages, in ib_umem_get()
199 umem->npages += ret; in ib_umem_get()
201 npages -= ret; in ib_umem_get()
[all …]
Dumem_odp.c528 int j, k, ret = 0, start_idx, npages = 0; in ib_umem_odp_map_dma_pages() local
575 npages = get_user_pages(owning_process, owning_mm, user_virt, in ib_umem_odp_map_dma_pages()
581 if (npages < 0) in ib_umem_odp_map_dma_pages()
584 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); in ib_umem_odp_map_dma_pages()
585 user_virt += npages << PAGE_SHIFT; in ib_umem_odp_map_dma_pages()
587 for (j = 0; j < npages; ++j) { in ib_umem_odp_map_dma_pages()
599 for (++j; j < npages; ++j) in ib_umem_odp_map_dma_pages()
606 if (npages < 0 && k == start_idx) in ib_umem_odp_map_dma_pages()
607 ret = npages; in ib_umem_odp_map_dma_pages()
/linux-4.4.14/arch/x86/kernel/
Dpci-calgary_64.c204 unsigned long start_addr, unsigned int npages) in iommu_range_reserve() argument
216 end = index + npages; in iommu_range_reserve()
222 bitmap_set(tbl->it_map, index, npages); in iommu_range_reserve()
229 unsigned int npages) in iommu_range_alloc() argument
238 BUG_ON(npages == 0); in iommu_range_alloc()
243 npages, 0, boundary_size, 0); in iommu_range_alloc()
248 npages, 0, boundary_size, 0); in iommu_range_alloc()
259 tbl->it_hint = offset + npages; in iommu_range_alloc()
268 void *vaddr, unsigned int npages, int direction) in iommu_alloc() argument
273 entry = iommu_range_alloc(dev, tbl, npages); in iommu_alloc()
[all …]
Dtce_64.c50 unsigned int npages, unsigned long uaddr, int direction) in tce_build() argument
62 while (npages--) { in tce_build()
75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) in tce_free() argument
81 while (npages--) { in tce_free()
Damd_gart_64.c217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); in dma_map_area() local
224 iommu_page = alloc_iommu(dev, npages, align_mask); in dma_map_area()
234 for (i = 0; i < npages; i++) { in dma_map_area()
270 int npages; in gart_unmap_page() local
278 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in gart_unmap_page()
279 for (i = 0; i < npages; i++) { in gart_unmap_page()
282 free_iommu(iommu_page, npages); in gart_unmap_page()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dmr.c197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, in mlx4_mtt_init() argument
202 if (!npages) { in mlx4_mtt_init()
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
420 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_alloc_reserved()
530 int npages, int page_shift, struct mlx4_mr *mr) in mlx4_mr_alloc() argument
540 access, npages, page_shift, mr); in mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
597 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); in mlx4_mr_rereg_mem_write()
695 int start_index, int npages, u64 *page_list) in mlx4_write_mtt_chunk() argument
[all …]
Dicm.c59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, in mlx4_free_icm_pages()
62 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent()
127 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, in mlx4_alloc_icm() argument
153 while (npages > 0) { in mlx4_alloc_icm()
168 chunk->npages = 0; in mlx4_alloc_icm()
173 while (1 << cur_order > npages) in mlx4_alloc_icm()
178 &chunk->mem[chunk->npages], in mlx4_alloc_icm()
181 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], in mlx4_alloc_icm()
192 ++chunk->npages; in mlx4_alloc_icm()
[all …]
Dicm.h52 int npages; member
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
Deq.c958 int npages; in mlx4_create_eq() local
970 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; in mlx4_create_eq()
972 eq->page_list = kmalloc(npages * sizeof *eq->page_list, in mlx4_create_eq()
977 for (i = 0; i < npages; ++i) in mlx4_create_eq()
980 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); in mlx4_create_eq()
989 for (i = 0; i < npages; ++i) { in mlx4_create_eq()
1013 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq()
1017 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq()
1057 for (i = 0; i < npages; ++i) in mlx4_create_eq()
1082 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; in mlx4_free_eq() local
[all …]
Dalloc.c593 buf->npages = 1; in mlx4_buf_alloc()
604 buf->npages *= 2; in mlx4_buf_alloc()
613 buf->npages = buf->nbufs; in mlx4_buf_alloc()
806 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, in mlx4_alloc_hwq_res()
Dresource_tracker.c3194 int npages = vhcr->in_modifier; in mlx4_WRITE_MTT_wrapper() local
3197 err = get_containing_mtt(dev, slave, start, npages, &rmtt); in mlx4_WRITE_MTT_wrapper()
3208 for (i = 0; i < npages; ++i) in mlx4_WRITE_MTT_wrapper()
3211 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, in mlx4_WRITE_MTT_wrapper()
/linux-4.4.14/arch/sparc/mm/
Diommu.c177 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) in iommu_get_one() argument
186 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); in iommu_get_one()
194 for (i = 0; i < npages; i++) { in iommu_get_one()
202 iommu_flush_iotlb(iopte0, npages); in iommu_get_one()
210 int npages; in iommu_get_scsi_one() local
215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; in iommu_get_scsi_one()
217 busa = iommu_get_one(dev, page, npages); in iommu_get_scsi_one()
283 static void iommu_release_one(struct device *dev, u32 busa, int npages) in iommu_release_one() argument
291 for (i = 0; i < npages; i++) { in iommu_release_one()
296 bit_map_clear(&iommu->usemap, ioptex, npages); in iommu_release_one()
[all …]
Dio-unit.c96 int i, j, k, npages; in iounit_get_area() local
100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; in iounit_get_area()
103 switch (npages) { in iounit_get_area()
109 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); in iounit_get_area()
116 if (scan + npages > limit) { in iounit_get_area()
127 for (k = 1, scan++; k < npages; k++) in iounit_get_area()
131 scan -= npages; in iounit_get_area()
134 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { in iounit_get_area()
Dsrmmu.c951 unsigned long npages; in srmmu_paging_init() local
957 npages = max_low_pfn - pfn_base; in srmmu_paging_init()
959 zones_size[ZONE_DMA] = npages; in srmmu_paging_init()
960 zholes_size[ZONE_DMA] = npages - pages_avail; in srmmu_paging_init()
962 npages = highend_pfn - max_low_pfn; in srmmu_paging_init()
963 zones_size[ZONE_HIGHMEM] = npages; in srmmu_paging_init()
964 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); in srmmu_paging_init()
/linux-4.4.14/arch/x86/kvm/
Diommu.c44 gfn_t base_gfn, unsigned long npages);
47 unsigned long npages) in kvm_pin_pages() argument
53 end_gfn = gfn + npages; in kvm_pin_pages()
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) in kvm_unpin_pages() argument
69 for (i = 0; i < npages; ++i) in kvm_unpin_pages()
86 end_gfn = gfn + slot->npages; in kvm_iommu_map_pages()
274 gfn_t base_gfn, unsigned long npages) in kvm_iommu_put_pages() argument
282 end_gfn = base_gfn + npages; in kvm_iommu_put_pages()
318 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); in kvm_iommu_unmap_pages()
Dpaging_tmpl.h144 int npages; in FNAME() local
149 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); in FNAME()
151 if (unlikely(npages != 1)) in FNAME()
Dmmu.c1519 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
4540 memslot->base_gfn + memslot->npages - 1, in slot_handle_level()
4581 end = min(gfn_end, memslot->base_gfn + memslot->npages); in kvm_zap_gfn_range()
4951 nr_pages += memslot->npages; in kvm_mmu_calculate_mmu_pages()
Dx86.c7709 if (WARN_ON(slot->npages)) in __x86_set_memory_region()
7721 if (!slot->npages) in __x86_set_memory_region()
7742 r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); in __x86_set_memory_region()
7803 unsigned long npages) in kvm_arch_create_memslot() argument
7812 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_arch_create_memslot()
7829 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_arch_create_memslot()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c139 int npages = nvkm_memory_size(memory) >> 12; in gk20a_instobj_cpu_map_dma() local
140 struct page *pages[npages]; in gk20a_instobj_cpu_map_dma()
146 for (i = 1; i < npages; i++) in gk20a_instobj_cpu_map_dma()
149 return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); in gk20a_instobj_cpu_map_dma()
160 int npages = nvkm_memory_size(memory) >> 12; in gk20a_instobj_cpu_map_iommu() local
162 return vmap(node->pages, npages, VM_MAP, in gk20a_instobj_cpu_map_iommu()
390 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_dma() argument
403 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, in gk20a_instobj_ctor_dma()
420 node->r.length = (npages << PAGE_SHIFT) >> 12; in gk20a_instobj_ctor_dma()
431 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_iommu() argument
[all …]
/linux-4.4.14/fs/nfs/
Dnfs3acl.c157 unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT); in __nfs3_proc_setacls() local
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); in __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) in __nfs3_proc_setacls()
164 args.npages++; in __nfs3_proc_setacls()
165 } while (args.npages < npages); in __nfs3_proc_setacls()
197 while (args.npages != 0) { in __nfs3_proc_setacls()
198 args.npages--; in __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); in __nfs3_proc_setacls()
Ddirect.c274 static void nfs_direct_release_pages(struct page **pages, unsigned int npages) in nfs_direct_release_pages() argument
277 for (i = 0; i < npages; i++) in nfs_direct_release_pages()
494 unsigned npages, i; in nfs_direct_read_schedule_iovec() local
503 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_read_schedule_iovec()
504 for (i = 0; i < npages; i++) { in nfs_direct_read_schedule_iovec()
527 nfs_direct_release_pages(pagevec, npages); in nfs_direct_read_schedule_iovec()
881 unsigned npages, i; in nfs_direct_write_schedule_iovec() local
890 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; in nfs_direct_write_schedule_iovec()
891 for (i = 0; i < npages; i++) { in nfs_direct_write_schedule_iovec()
918 nfs_direct_release_pages(pagevec, npages); in nfs_direct_write_schedule_iovec()
Dfscache.c372 unsigned npages = *nr_pages; in __nfs_readpages_from_fscache() local
376 nfs_i_fscache(inode), npages, inode); in __nfs_readpages_from_fscache()
383 if (*nr_pages < npages) in __nfs_readpages_from_fscache()
385 npages); in __nfs_readpages_from_fscache()
Dread.c384 unsigned long npages; in nfs_readpages() local
422 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> in nfs_readpages()
424 nfs_add_stats(inode, NFSIOS_READPAGES, npages); in nfs_readpages()
Dpagelist.c34 p->npages = pagecount; in nfs_pgarray_set()
40 p->npages = 0; in nfs_pgarray_set()
Ddir.c586 void nfs_readdir_free_pages(struct page **pages, unsigned int npages) in nfs_readdir_free_pages() argument
589 for (i = 0; i < npages; i++) in nfs_readdir_free_pages()
598 int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) in nfs_readdir_alloc_pages() argument
602 for (i = 0; i < npages; i++) { in nfs_readdir_alloc_pages()
Dnfs4proc.c4728 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); in __nfs4_get_acl_uncached() local
4733 if (npages == 0) in __nfs4_get_acl_uncached()
4734 npages = 1; in __nfs4_get_acl_uncached()
4735 if (npages > ARRAY_SIZE(pages)) in __nfs4_get_acl_uncached()
4738 for (i = 0; i < npages; i++) { in __nfs4_get_acl_uncached()
4749 args.acl_len = npages * PAGE_SIZE; in __nfs4_get_acl_uncached()
4752 __func__, buf, buflen, npages, args.acl_len); in __nfs4_get_acl_uncached()
4777 for (i = 0; i < npages; i++) in __nfs4_get_acl_uncached()
4834 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); in __nfs4_proc_set_acl() local
4839 if (npages > ARRAY_SIZE(pages)) in __nfs4_proc_set_acl()
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_64_vio.c104 long npages; in kvm_vm_ioctl_create_spapr_tce() local
114 npages = kvmppc_stt_npages(args->window_size); in kvm_vm_ioctl_create_spapr_tce()
116 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), in kvm_vm_ioctl_create_spapr_tce()
125 for (i = 0; i < npages; i++) { in kvm_vm_ioctl_create_spapr_tce()
143 for (i = 0; i < npages; i++) in kvm_vm_ioctl_create_spapr_tce()
Dbook3s_64_mmu_hv.c179 unsigned long npages; in kvmppc_map_vrma() local
189 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
192 if (npages > 1ul << (40 - porder)) in kvmppc_map_vrma()
193 npages = 1ul << (40 - porder); in kvmppc_map_vrma()
195 if (npages > kvm->arch.hpt_mask + 1) in kvmppc_map_vrma()
196 npages = kvm->arch.hpt_mask + 1; in kvmppc_map_vrma()
203 for (i = 0; i < npages; ++i) { in kvmppc_map_vrma()
449 long index, ret, npages; in kvmppc_book3s_hv_page_fault() local
514 npages = get_user_pages_fast(hva, 1, writing, pages); in kvmppc_book3s_hv_page_fault()
515 if (npages < 1) { in kvmppc_book3s_hv_page_fault()
[all …]
Dbook3s_hv_rm_mmu.c408 long npages, int global, bool need_sync) in do_tlbies() argument
417 for (i = 0; i < npages; ++i) in do_tlbies()
425 for (i = 0; i < npages; ++i) in do_tlbies()
Dbook3s.c750 unsigned long npages) in kvmppc_core_create_memslot() argument
752 return kvm->arch.kvm_ops->create_memslot(slot, npages); in kvmppc_core_create_memslot()
Dbook3s_pr.c266 (memslot->npages << PAGE_SHIFT)); in do_kvm_unmap_hva()
1553 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log_pr()
1596 unsigned long npages) in kvmppc_core_create_memslot_pr() argument
Dbook3s_hv.c2829 unsigned long npages) in kvmppc_core_create_memslot_hv() argument
2831 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); in kvmppc_core_create_memslot_hv()
2850 unsigned long npages = mem->memory_size >> PAGE_SHIFT; in kvmppc_core_commit_memory_region_hv() local
2854 if (npages && old->npages) { in kvmppc_core_commit_memory_region_hv()
Dpowerpc.c594 unsigned long npages) in kvm_arch_create_memslot() argument
596 return kvmppc_core_create_memslot(kvm, slot, npages); in kvm_arch_create_memslot()
De500_mmu_host.c384 slot_end = slot_start + slot->npages; in kvmppc_e500_shadow_map()
/linux-4.4.14/arch/alpha/kernel/
Dpci_iommu.c257 long npages, dma_ofs, i; in pci_map_single_1() local
300 npages = iommu_num_pages(paddr, size, PAGE_SIZE); in pci_map_single_1()
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1()
313 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) in pci_map_single_1()
320 cpu_addr, size, npages, ret, __builtin_return_address(0)); in pci_map_single_1()
378 long dma_ofs, npages; in alpha_pci_unmap_page() local
411 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in alpha_pci_unmap_page()
415 iommu_arena_free(arena, dma_ofs, npages); in alpha_pci_unmap_page()
426 dma_addr, size, npages, __builtin_return_address(0)); in alpha_pci_unmap_page()
562 long npages, dma_ofs, i; in sg_fill() local
[all …]
/linux-4.4.14/arch/powerpc/platforms/pasemi/
Diommu.c89 long npages, unsigned long uaddr, in iobmap_build() argument
97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); in iobmap_build()
103 while (npages--) { in iobmap_build()
118 long npages) in iobmap_free() argument
123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); in iobmap_free()
129 while (npages--) { in iobmap_free()
/linux-4.4.14/arch/powerpc/mm/
Dmmu_context_iommu.c33 unsigned long npages, bool incr) in mm_iommu_adjust_locked_vm() argument
37 if (!npages) in mm_iommu_adjust_locked_vm()
43 locked = mm->locked_vm + npages; in mm_iommu_adjust_locked_vm()
48 mm->locked_vm += npages; in mm_iommu_adjust_locked_vm()
50 if (WARN_ON_ONCE(npages > mm->locked_vm)) in mm_iommu_adjust_locked_vm()
51 npages = mm->locked_vm; in mm_iommu_adjust_locked_vm()
52 mm->locked_vm -= npages; in mm_iommu_adjust_locked_vm()
58 npages << PAGE_SHIFT, in mm_iommu_adjust_locked_vm()
Dsubpage-prot.c61 int npages) in hpte_flush_range() argument
80 for (; npages > 0; --npages) { in hpte_flush_range()
Dhugetlbpage.c361 unsigned long npages; in do_gpage_early_setup() local
375 if (sscanf(val, "%lu", &npages) <= 0) in do_gpage_early_setup()
376 npages = 0; in do_gpage_early_setup()
377 if (npages > MAX_NUMBER_GPAGES) { in do_gpage_early_setup()
381 npages, size / 1024); in do_gpage_early_setup()
382 npages = MAX_NUMBER_GPAGES; in do_gpage_early_setup()
384 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; in do_gpage_early_setup()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dmr.c140 int npages = 1 << ent->order; in add_keys() local
163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); in add_keys()
684 int npages; in get_octo_len() local
687 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); in get_octo_len()
688 return (npages + 1) / 2; in get_octo_len()
718 umrwr->npages = n; in prep_umr_reg_wqe()
760 u64 virt_addr, u64 len, int npages, in reg_umr() argument
796 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); in reg_umr()
806 memset(pas + npages, 0, size - npages * sizeof(u64)); in reg_umr()
816 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key, in reg_umr()
[all …]
Dodp.c187 int npages = 0, ret = 0; in pagefault_single_data_segment() local
237 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, in pagefault_single_data_segment()
239 if (npages < 0) { in pagefault_single_data_segment()
240 ret = npages; in pagefault_single_data_segment()
244 if (npages > 0) { in pagefault_single_data_segment()
252 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); in pagefault_single_data_segment()
264 u32 new_mappings = npages * PAGE_SIZE - in pagefault_single_data_segment()
289 return ret ? ret : npages; in pagefault_single_data_segment()
315 int ret = 0, npages = 0; in pagefault_data_segments() local
373 npages += ret; in pagefault_data_segments()
[all …]
Dsrq.c84 int npages; in create_srq_user() local
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user()
160 int npages; in create_srq_kernel() local
185 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); in create_srq_kernel()
187 buf_size, page_shift, srq->buf.npages, npages); in create_srq_kernel()
188 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; in create_srq_kernel()
Dcq.c619 int npages; in create_cq_user() local
653 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, in create_cq_user()
656 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); in create_cq_user()
719 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; in create_cq_kernel()
965 int npages; in resize_user() local
982 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, in resize_user()
1115 npas = cq->resize_buf->buf.npages; in mlx5_ib_resize_cq()
Dmlx5_ib.h255 unsigned int npages; member
333 int npages; member
553 int npages, int zap);
Dqp.c602 int npages; in create_user_qp() local
658 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, in create_user_qp()
666 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); in create_user_qp()
770 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; in create_kernel_qp()
1853 static __be16 get_klm_octo(int npages) in get_klm_octo() argument
1855 return cpu_to_be16(ALIGN(npages, 8) / 2); in get_klm_octo()
1967 umr->klm_octowords = get_klm_octo(umrwr->npages); in set_reg_umr_segment()
/linux-4.4.14/fs/ramfs/
Dfile-nommu.c68 unsigned long npages, xpages, loop; in ramfs_nommu_expand_for_mapping() local
94 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; in ramfs_nommu_expand_for_mapping()
99 for (loop = npages; loop < xpages; loop++) in ramfs_nommu_expand_for_mapping()
103 newsize = PAGE_SIZE * npages; in ramfs_nommu_expand_for_mapping()
108 for (loop = 0; loop < npages; loop++) { in ramfs_nommu_expand_for_mapping()
127 while (loop < npages) in ramfs_nommu_expand_for_mapping()
/linux-4.4.14/lib/
Diommu-common.c99 unsigned long npages, in iommu_tbl_range_alloc() argument
112 bool largealloc = (large_pool && npages > iommu_large_alloc); in iommu_tbl_range_alloc()
120 if (unlikely(npages == 0)) { in iommu_tbl_range_alloc()
185 n = iommu_area_alloc(iommu->map, limit, start, npages, shift, in iommu_tbl_range_alloc()
215 end = n + npages; in iommu_tbl_range_alloc()
252 unsigned long npages, unsigned long entry) in iommu_tbl_range_free() argument
263 bitmap_clear(iommu->map, entry, npages); in iommu_tbl_range_free()
Diov_iter.c754 int npages = 0; in iov_iter_npages() local
761 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) in iov_iter_npages()
763 if (npages >= maxpages) in iov_iter_npages()
766 npages++; in iov_iter_npages()
767 if (npages >= maxpages) in iov_iter_npages()
771 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) in iov_iter_npages()
773 if (npages >= maxpages) in iov_iter_npages()
777 return npages; in iov_iter_npages()
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_uiom.c109 unsigned long npages; in usnic_uiom_get_pages() local
129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; in usnic_uiom_get_pages()
133 locked = npages + current->mm->locked_vm; in usnic_uiom_get_pages()
146 while (npages) { in usnic_uiom_get_pages()
148 min_t(unsigned long, npages, in usnic_uiom_get_pages()
155 npages -= ret; in usnic_uiom_get_pages()
220 int npages; in __usnic_uiom_reg_release() local
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; in __usnic_uiom_reg_release()
228 vpn_last = vpn_start + npages - 1; in __usnic_uiom_reg_release()
342 unsigned long npages; in usnic_uiom_reg_get() local
[all …]
/linux-4.4.14/crypto/
Daf_alg.c398 int npages, i; in af_alg_make_sg() local
404 npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; in af_alg_make_sg()
405 if (WARN_ON(npages == 0)) in af_alg_make_sg()
408 sg_init_table(sgl->sg, npages + 1); in af_alg_make_sg()
410 for (i = 0, len = n; i < npages; i++) { in af_alg_make_sg()
418 sg_mark_end(sgl->sg + npages - 1); in af_alg_make_sg()
419 sgl->npages = npages; in af_alg_make_sg()
427 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); in af_alg_link_sg()
428 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); in af_alg_link_sg()
436 for (i = 0; i < sgl->npages; i++) in af_alg_free_sg()
/linux-4.4.14/arch/powerpc/sysdev/
Ddart_iommu.c164 long npages, unsigned long uaddr, in dart_build() argument
172 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build()
179 l = npages; in dart_build()
195 while (npages--) in dart_build()
204 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument
213 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free()
217 while (npages--) in dart_free()
/linux-4.4.14/arch/tile/kernel/
Dmodule.c43 int npages; in module_alloc() local
45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in module_alloc()
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); in module_alloc()
49 for (; i < npages; ++i) { in module_alloc()
58 area->nr_pages = npages; in module_alloc()
/linux-4.4.14/arch/powerpc/platforms/pseries/
Diommu.c153 long npages, unsigned long uaddr, in tce_build_pSeries() argument
168 while (npages--) { in tce_build_pSeries()
183 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) in tce_free_pSeries() argument
189 while (npages--) in tce_free_pSeries()
209 long npages, unsigned long uaddr, in tce_build_pSeriesLP() argument
217 long tcenum_start = tcenum, npages_start = npages; in tce_build_pSeriesLP()
224 while (npages--) { in tce_build_pSeriesLP()
231 (npages_start - (npages + 1))); in tce_build_pSeriesLP()
252 long npages, unsigned long uaddr, in tce_buildmulti_pSeriesLP() argument
261 long tcenum_start = tcenum, npages_start = npages; in tce_buildmulti_pSeriesLP()
[all …]
/linux-4.4.14/fs/freevxfs/
Dvxfs_lookup.c109 u_long npages, page, nblocks, pblocks, block; in vxfs_find_entry() local
114 npages = dir_pages(ip); in vxfs_find_entry()
118 for (page = 0; page < npages; page++) { in vxfs_find_entry()
236 u_long page, npages, block, pblocks, nblocks, offset; in vxfs_readdir() local
254 npages = dir_pages(ip); in vxfs_readdir()
262 for (; page < npages; page++, block = 0) { in vxfs_readdir()
/linux-4.4.14/fs/sysv/
Ddir.c68 unsigned long npages = dir_pages(inode); in sysv_readdir() local
79 for ( ; n < npages; n++, offset = 0) { in sysv_readdir()
132 unsigned long npages = dir_pages(dir); in sysv_find_entry() local
139 if (start >= npages) in sysv_find_entry()
160 if (++n >= npages) in sysv_find_entry()
179 unsigned long npages = dir_pages(dir); in sysv_add_link() local
186 for (n = 0; n <= npages; n++) { in sysv_add_link()
287 unsigned long i, npages = dir_pages(inode); in sysv_empty_dir() local
289 for (i = 0; i < npages; i++) { in sysv_empty_dir()
/linux-4.4.14/arch/arm/kernel/
Dprocess.c369 unsigned int npages) in sigpage_addr() argument
379 last = TASK_SIZE - (npages << PAGE_SHIFT); in sigpage_addr()
410 unsigned long npages; in arch_setup_additional_pages() local
420 npages = 1; /* for sigpage */ in arch_setup_additional_pages()
421 npages += vdso_total_pages; in arch_setup_additional_pages()
424 hint = sigpage_addr(mm, npages); in arch_setup_additional_pages()
425 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); in arch_setup_additional_pages()
/linux-4.4.14/drivers/gpu/drm/udl/
Dudl_dmabuf.c221 int npages; in udl_prime_create() local
223 npages = size / PAGE_SIZE; in udl_prime_create()
226 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); in udl_prime_create()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in udl_prime_create()
233 DRM_ERROR("obj pages is NULL %d\n", npages); in udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); in udl_prime_create()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Dsec_bulk.c173 static void enc_pools_release_free_pages(long npages) in enc_pools_release_free_pages() argument
178 LASSERT(npages > 0); in enc_pools_release_free_pages()
179 LASSERT(npages <= page_pools.epp_free_pages); in enc_pools_release_free_pages()
185 page_pools.epp_free_pages -= npages; in enc_pools_release_free_pages()
186 page_pools.epp_total_pages -= npages; in enc_pools_release_free_pages()
196 while (npages--) { in enc_pools_release_free_pages()
275 int npages_to_npools(unsigned long npages) in npages_to_npools() argument
277 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL); in npages_to_npools()
Dptlrpc_internal.h60 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
Dclient.c102 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, in ptlrpc_new_bulk() argument
108 desc = kzalloc(offsetof(struct ptlrpc_bulk_desc, bd_iov[npages]), in ptlrpc_new_bulk()
115 desc->bd_max_iov = npages; in ptlrpc_new_bulk()
140 unsigned npages, unsigned max_brw, in ptlrpc_prep_bulk_imp() argument
147 desc = ptlrpc_new_bulk(npages, max_brw, type, portal); in ptlrpc_prep_bulk_imp()
/linux-4.4.14/fs/qnx6/
Ddir.c117 unsigned long npages = dir_pages(inode); in qnx6_readdir() local
126 for ( ; !done && n < npages; n++, start = 0) { in qnx6_readdir()
219 unsigned long npages = dir_pages(dir); in qnx6_find_entry() local
226 if (npages == 0) in qnx6_find_entry()
229 if (start >= npages) in qnx6_find_entry()
261 if (++n >= npages) in qnx6_find_entry()
/linux-4.4.14/drivers/gpu/drm/msm/
Dmsm_gem.c43 int npages) in get_pages_vram() argument
51 p = drm_malloc_ab(npages, sizeof(struct page *)); in get_pages_vram()
56 npages, 0, DRM_MM_SEARCH_DEFAULT); in get_pages_vram()
63 for (i = 0; i < npages; i++) { in get_pages_vram()
79 int npages = obj->size >> PAGE_SHIFT; in get_pages() local
84 p = get_pages_vram(obj, npages); in get_pages()
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); in get_pages()
670 int ret, npages; in msm_gem_import() local
686 npages = size / PAGE_SIZE; in msm_gem_import()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); in msm_gem_import()
[all …]
Dmsm_gem_prime.c26 int npages = obj->size >> PAGE_SHIFT; in msm_gem_prime_get_sg_table() local
31 return drm_prime_pages_to_sg(msm_obj->pages, npages); in msm_gem_prime_get_sg_table()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dmem.c396 struct c4iw_mr *mhp, int shift, int npages) in reregister_mem() argument
401 if (npages > mhp->attr.pbl_size) in reregister_mem()
421 static int alloc_pbl(struct c4iw_mr *mhp, int npages) in alloc_pbl() argument
424 npages << 3); in alloc_pbl()
429 mhp->attr.pbl_size = npages; in alloc_pbl()
436 u64 *total_size, int *npages, in build_phys_page_list() argument
473 *npages = 0; in build_phys_page_list()
475 *npages += (buffer_list[i].size + in build_phys_page_list()
478 if (!*npages) in build_phys_page_list()
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); in build_phys_page_list()
[all …]
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_user_sdma.c276 unsigned long addr, int tlen, int npages) in ipath_user_sdma_pin_pages() argument
282 ret = get_user_pages_fast(addr, npages, 0, pages); in ipath_user_sdma_pin_pages()
283 if (ret != npages) { in ipath_user_sdma_pin_pages()
293 for (j = 0; j < npages; j++) { in ipath_user_sdma_pin_pages()
330 const int npages = ipath_user_sdma_num_pages(iov + idx); in ipath_user_sdma_pin_pkt() local
335 npages); in ipath_user_sdma_pin_pkt()
354 unsigned long niov, int npages) in ipath_user_sdma_init_payload() argument
358 if (npages >= ARRAY_SIZE(pkt->addr)) in ipath_user_sdma_init_payload()
415 int npages = 0; in ipath_user_sdma_queue_pkts() local
491 npages++; in ipath_user_sdma_queue_pkts()
[all …]
/linux-4.4.14/arch/x86/include/asm/
Dtce.h42 unsigned int npages, unsigned long uaddr, int direction);
43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
/linux-4.4.14/fs/nilfs2/
Ddir.c261 unsigned long npages = dir_pages(inode); in nilfs_readdir() local
267 for ( ; n < npages; n++, offset = 0) { in nilfs_readdir()
326 unsigned long npages = dir_pages(dir); in nilfs_find_entry() local
331 if (npages == 0) in nilfs_find_entry()
338 if (start >= npages) in nilfs_find_entry()
361 if (++n >= npages) in nilfs_find_entry()
441 unsigned long npages = dir_pages(dir); in nilfs_add_link() local
452 for (n = 0; n <= npages; n++) { in nilfs_add_link()
616 unsigned long i, npages = dir_pages(inode); in nilfs_empty_dir() local
618 for (i = 0; i < npages; i++) { in nilfs_empty_dir()
/linux-4.4.14/include/linux/
Diommu-common.h43 unsigned long npages,
49 u64 dma_addr, unsigned long npages,
Dkvm_host.h300 unsigned long npages; member
310 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
476 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
573 unsigned long npages);
925 gfn < memslots[slot].base_gfn + memslots[slot].npages) in search_memslots()
938 gfn < memslots[start].base_gfn + memslots[start].npages) { in search_memslots()
Defi.h1077 static inline void memrange_efi_to_native(u64 *addr, u64 *npages) in memrange_efi_to_native() argument
1079 *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); in memrange_efi_to_native()
Dnfs_xdr.h775 unsigned int npages; member
1369 unsigned int npages; /* Max length of pagevec */ member
/linux-4.4.14/arch/arm64/kernel/
Defi.c166 u64 paddr, npages, size; in reserve_regions() local
173 npages = md->num_pages; in reserve_regions()
179 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, in reserve_regions()
183 memrange_efi_to_native(&paddr, &npages); in reserve_regions()
184 size = npages << PAGE_SHIFT; in reserve_regions()
/linux-4.4.14/fs/minix/
Ddir.c86 unsigned long npages = dir_pages(inode); in minix_readdir() local
98 for ( ; n < npages; n++, offset = 0) { in minix_readdir()
158 unsigned long npages = dir_pages(dir); in minix_find_entry() local
166 for (n = 0; n < npages; n++) { in minix_find_entry()
207 unsigned long npages = dir_pages(dir); in minix_add_link() local
222 for (n = 0; n <= npages; n++) { in minix_add_link()
363 unsigned long i, npages = dir_pages(inode); in minix_empty_dir() local
368 for (i = 0; i < npages; i++) { in minix_empty_dir()
/linux-4.4.14/fs/ufs/
Ddir.c256 unsigned long npages = dir_pages(dir); in ufs_find_entry() local
263 if (npages == 0 || namelen > UFS_MAXNAMLEN) in ufs_find_entry()
271 if (start >= npages) in ufs_find_entry()
294 if (++n >= npages) in ufs_find_entry()
320 unsigned long npages = dir_pages(dir); in ufs_add_link() local
333 for (n = 0; n <= npages; n++) { in ufs_add_link()
437 unsigned long npages = dir_pages(inode); in ufs_readdir() local
447 for ( ; n < npages; n++, offset = 0) { in ufs_readdir()
608 unsigned long i, npages = dir_pages(inode); in ufs_empty_dir() local
610 for (i = 0; i < npages; i++) { in ufs_empty_dir()
/linux-4.4.14/fs/ext2/
Ddir.c292 unsigned long npages = dir_pages(inode); in ext2_readdir() local
303 for ( ; n < npages; n++, offset = 0) { in ext2_readdir()
368 unsigned long npages = dir_pages(dir); in ext2_find_entry() local
374 if (npages == 0) in ext2_find_entry()
381 if (start >= npages) in ext2_find_entry()
406 if (++n >= npages) in ext2_find_entry()
492 unsigned long npages = dir_pages(dir); in ext2_add_link() local
503 for (n = 0; n <= npages; n++) { in ext2_add_link()
668 unsigned long i, npages = dir_pages(inode); in ext2_empty_dir() local
671 for (i = 0; i < npages; i++) { in ext2_empty_dir()
/linux-4.4.14/fs/exofs/
Ddir.c242 unsigned long npages = dir_pages(inode); in exofs_readdir() local
249 for ( ; n < npages; n++, offset = 0) { in exofs_readdir()
310 unsigned long npages = dir_pages(dir); in exofs_find_entry() local
315 if (npages == 0) in exofs_find_entry()
321 if (start >= npages) in exofs_find_entry()
345 if (++n >= npages) in exofs_find_entry()
435 unsigned long npages = dir_pages(dir); in exofs_add_link() local
441 for (n = 0; n <= npages; n++) { in exofs_add_link()
612 unsigned long i, npages = dir_pages(inode); in exofs_empty_dir() local
614 for (i = 0; i < npages; i++) { in exofs_empty_dir()
/linux-4.4.14/drivers/gpu/drm/exynos/
Dexynos_drm_gem.c552 int npages; in exynos_drm_gem_prime_get_sg_table() local
554 npages = exynos_gem->size >> PAGE_SHIFT; in exynos_drm_gem_prime_get_sg_table()
556 return drm_prime_pages_to_sg(exynos_gem->pages, npages); in exynos_drm_gem_prime_get_sg_table()
565 int npages; in exynos_drm_gem_prime_import_sg_table() local
576 npages = exynos_gem->size >> PAGE_SHIFT; in exynos_drm_gem_prime_import_sg_table()
577 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *)); in exynos_drm_gem_prime_import_sg_table()
584 npages); in exynos_drm_gem_prime_import_sg_table()
Dexynos_drm_gem.h139 unsigned int npages,
145 unsigned int npages,
Dexynos_drm_g2d.c418 unsigned int npages, offset; in g2d_userptr_get_dma_addr() local
467 npages = (end - start) >> PAGE_SHIFT; in g2d_userptr_get_dma_addr()
468 g2d_userptr->vec = frame_vector_create(npages); in g2d_userptr_get_dma_addr()
474 ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); in g2d_userptr_get_dma_addr()
475 if (ret != npages) { in g2d_userptr_get_dma_addr()
495 npages, offset, size, GFP_KERNEL); in g2d_userptr_get_dma_addr()
515 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { in g2d_userptr_get_dma_addr()
516 g2d->current_pool += npages << PAGE_SHIFT; in g2d_userptr_get_dma_addr()
/linux-4.4.14/virt/kvm/
Dkvm_main.c526 free->npages = 0; in kvm_free_memslot()
728 if (!new->npages) { in update_memslots()
729 WARN_ON(!mslots[i].npages); in update_memslots()
730 if (mslots[i].npages) in update_memslots()
733 if (!mslots[i].npages) in update_memslots()
739 if (!mslots[i + 1].npages) in update_memslots()
755 if (new->npages) { in update_memslots()
823 unsigned long npages; in __kvm_set_memory_region() local
857 npages = mem->memory_size >> PAGE_SHIFT; in __kvm_set_memory_region()
859 if (npages > KVM_MEM_MAX_NR_PAGES) in __kvm_set_memory_region()
[all …]
/linux-4.4.14/drivers/gpu/drm/
Ddrm_gem.c466 int i, npages; in drm_gem_get_pages() local
477 npages = obj->size >> PAGE_SHIFT; in drm_gem_get_pages()
479 pages = drm_malloc_ab(npages, sizeof(struct page *)); in drm_gem_get_pages()
483 for (i = 0; i < npages; i++) { in drm_gem_get_pages()
519 int i, npages; in drm_gem_put_pages() local
527 npages = obj->size >> PAGE_SHIFT; in drm_gem_put_pages()
529 for (i = 0; i < npages; i++) { in drm_gem_put_pages()
/linux-4.4.14/drivers/gpu/drm/omapdrm/
Domap_gem.c230 int npages = obj->size >> PAGE_SHIFT; in omap_gem_attach_pages() local
246 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
252 for (i = 0; i < npages; i++) { in omap_gem_attach_pages()
257 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); in omap_gem_attach_pages()
284 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_detach_pages() local
285 for (i = 0; i < npages; i++) { in omap_gem_detach_pages()
664 uint32_t npages = obj->size >> PAGE_SHIFT; in omap_gem_roll() local
667 if (roll > npages) { in omap_gem_roll()
682 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); in omap_gem_roll()
716 int i, npages = obj->size >> PAGE_SHIFT; in omap_gem_dma_sync() local
[all …]
Domap_fbdev.c51 int npages; in pan_worker() local
54 npages = fbi->fix.line_length >> PAGE_SHIFT; in pan_worker()
55 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); in pan_worker()
Domap_dmm_tiler.c206 struct page **pages, uint32_t npages, uint32_t roll) in dmm_txn_append() argument
238 if (n >= npages) in dmm_txn_append()
239 n -= npages; in dmm_txn_append()
306 uint32_t npages, uint32_t roll, bool wait) in fill() argument
322 dmm_txn_append(txn, &p_area, pages, npages, roll); in fill()
339 uint32_t npages, uint32_t roll, bool wait) in tiler_pin() argument
343 ret = fill(&block->area, pages, npages, roll, wait); in tiler_pin()
Domap_dmm_tiler.h92 uint32_t npages, uint32_t roll, bool wait);
/linux-4.4.14/arch/x86/platform/efi/
Defi.c539 u64 addr, npages; in efi_set_executable() local
542 npages = md->num_pages; in efi_set_executable()
544 memrange_efi_to_native(&addr, &npages); in efi_set_executable()
547 set_memory_x(addr, npages); in efi_set_executable()
549 set_memory_nx(addr, npages); in efi_set_executable()
571 u64 npages; in efi_memory_uc() local
573 npages = round_up(size, page_shift) / page_shift; in efi_memory_uc()
574 memrange_efi_to_native(&addr, &npages); in efi_memory_uc()
575 set_memory_uc(addr, npages); in efi_memory_uc()
Defi_64.c148 unsigned npages; in efi_setup_page_tables() local
186 npages = (_end - _text) >> PAGE_SHIFT; in efi_setup_page_tables()
189 if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { in efi_setup_page_tables()
/linux-4.4.14/drivers/staging/lustre/lnet/lnet/
Drouter.c1220 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) in lnet_destroy_rtrbuf() argument
1222 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); in lnet_destroy_rtrbuf()
1224 while (--npages >= 0) in lnet_destroy_rtrbuf()
1225 __free_page(rb->rb_kiov[npages].kiov_page); in lnet_destroy_rtrbuf()
1233 int npages = rbp->rbp_npages; in lnet_new_rtrbuf() local
1234 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); in lnet_new_rtrbuf()
1245 for (i = 0; i < npages; i++) { in lnet_new_rtrbuf()
1268 int npages = rbp->rbp_npages; in lnet_rtrpool_free_bufs() local
1284 lnet_destroy_rtrbuf(rb, npages); in lnet_rtrpool_free_bufs()
1329 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages) in lnet_rtrpool_init() argument
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/obdecho/
Decho_client.c169 struct page **pages, int npages, int async);
1175 struct page **pages, int npages, int async) in cl_echo_object_brw() argument
1209 offset + npages * PAGE_CACHE_SIZE - 1, in cl_echo_object_brw()
1215 for (i = 0; i < npages; i++) { in cl_echo_object_brw()
1552 u32 npages; in echo_client_kbrw() local
1578 npages = count >> PAGE_CACHE_SHIFT; in echo_client_kbrw()
1583 pga = kcalloc(npages, sizeof(*pga), GFP_NOFS); in echo_client_kbrw()
1587 pages = kcalloc(npages, sizeof(*pages), GFP_NOFS); in echo_client_kbrw()
1594 i < npages; in echo_client_kbrw()
1617 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); in echo_client_kbrw()
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/
Dnouveau_prime.c34 int npages = nvbo->bo.num_pages; in nouveau_gem_prime_get_sg_table() local
36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); in nouveau_gem_prime_get_sg_table()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_user_sdma.c665 unsigned long addr, int tlen, int npages) in qib_user_sdma_pin_pages() argument
671 while (npages) { in qib_user_sdma_pin_pages()
672 if (npages > 8) in qib_user_sdma_pin_pages()
675 j = npages; in qib_user_sdma_pin_pages()
705 npages -= j; in qib_user_sdma_pin_pages()
729 const int npages = qib_user_sdma_num_pages(iov + idx); in qib_user_sdma_pin_pkt() local
733 iov[idx].iov_len, npages); in qib_user_sdma_pin_pkt()
765 unsigned long niov, int npages) in qib_user_sdma_init_payload() argument
770 npages >= ARRAY_SIZE(pkt->addr)) in qib_user_sdma_init_payload()
831 int npages = 0; in qib_user_sdma_queue_pkts() local
[all …]
Dqib_keys.c364 if (mr->npages > mrg->max_segs) in qib_reg_mr()
368 if (mr->ibmr.length > ps * mr->npages) in qib_reg_mr()
379 for (i = 0; i < mr->npages; i++) { in qib_reg_mr()
Dqib_mr.c359 if (unlikely(mr->npages == mr->mr.max_segs)) in qib_set_page()
362 mr->pages[mr->npages++] = addr; in qib_set_page()
373 mr->npages = 0; in qib_map_mr_sg()
Dqib_verbs.h333 u32 npages; member
/linux-4.4.14/drivers/lightnvm/
Drrpc.c625 sector_t laddr, uint8_t npages) in rrpc_end_io_write() argument
632 for (i = 0; i < npages; i++) { in rrpc_end_io_write()
647 uint8_t npages = rqd->nr_pages; in rrpc_end_io() local
648 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages; in rrpc_end_io()
651 rrpc_end_io_write(rrpc, rrqd, laddr, npages); in rrpc_end_io()
659 if (npages > 1) in rrpc_end_io()
670 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_read_ppalist_rq() argument
683 for (i = 0; i < npages; i++) { in rrpc_read_ppalist_rq()
734 struct nvm_rq *rqd, unsigned long flags, int npages) in rrpc_write_ppalist_rq() argument
747 for (i = 0; i < npages; i++) { in rrpc_write_ppalist_rq()
[all …]
/linux-4.4.14/drivers/vfio/
Dvfio_iommu_spapr_tce.c34 static long try_increment_locked_vm(long npages) in try_increment_locked_vm() argument
41 if (!npages) in try_increment_locked_vm()
45 locked = current->mm->locked_vm + npages; in try_increment_locked_vm()
50 current->mm->locked_vm += npages; in try_increment_locked_vm()
53 npages << PAGE_SHIFT, in try_increment_locked_vm()
63 static void decrement_locked_vm(long npages) in decrement_locked_vm() argument
65 if (!current || !current->mm || !npages) in decrement_locked_vm()
69 if (WARN_ON_ONCE(npages > current->mm->locked_vm)) in decrement_locked_vm()
70 npages = current->mm->locked_vm; in decrement_locked_vm()
71 current->mm->locked_vm -= npages; in decrement_locked_vm()
[all …]
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_prime.c35 int npages = bo->tbo.num_pages; in amdgpu_gem_prime_get_sg_table() local
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in amdgpu_gem_prime_get_sg_table()
Damdgpu_cgs.c85 int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; in amdgpu_cgs_gmap_kmem() local
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); in amdgpu_cgs_gmap_kmem()
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_prime.c35 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() local
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table()
/linux-4.4.14/arch/powerpc/include/asm/
Diommu.h53 long index, long npages,
69 long index, long npages);
294 unsigned long npages);
Dkvm_book3s_64.h388 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
Dkvm_ppc.h182 unsigned long npages);
262 unsigned long npages);
/linux-4.4.14/drivers/gpu/drm/i915/
Di915_gem_userptr.c571 const int npages = obj->base.size >> PAGE_SHIFT; in __i915_gem_userptr_get_pages_worker() local
578 pvec = kmalloc(npages*sizeof(struct page *), in __i915_gem_userptr_get_pages_worker()
581 pvec = drm_malloc_ab(npages, sizeof(struct page *)); in __i915_gem_userptr_get_pages_worker()
586 while (pinned < npages) { in __i915_gem_userptr_get_pages_worker()
589 npages - pinned, in __i915_gem_userptr_get_pages_worker()
602 if (pinned == npages) { in __i915_gem_userptr_get_pages_worker()
603 ret = __i915_gem_userptr_set_pages(obj, pvec, npages); in __i915_gem_userptr_get_pages_worker()
Di915_cmd_parser.c866 int npages = last_page - first_page; in vmap_batch() local
869 pages = drm_malloc_ab(npages, sizeof(*pages)); in vmap_batch()
878 if (i == npages) in vmap_batch()
/linux-4.4.14/drivers/staging/android/ion/
Dion_heap.c36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; in ion_heap_map_kernel() local
37 struct page **pages = vmalloc(sizeof(struct page *) * npages); in ion_heap_map_kernel()
52 BUG_ON(i >= npages); in ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); in ion_heap_map_kernel()
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/
Dmodule.c114 static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, in kportal_memhog_alloc() argument
126 if (npages < 0) in kportal_memhog_alloc()
129 if (npages == 0) in kportal_memhog_alloc()
142 while (ldu->ldu_memhog_pages < npages && in kportal_memhog_alloc()
157 while (ldu->ldu_memhog_pages < npages && in kportal_memhog_alloc()
/linux-4.4.14/drivers/xen/
Dprivcmd.c210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || in mmap_gfn_range()
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) in mmap_gfn_range()
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) in mmap_gfn_range()
221 msg->mfn, msg->npages, in mmap_gfn_range()
227 st->va += msg->npages << PAGE_SHIFT; in mmap_gfn_range()
/linux-4.4.14/arch/s390/pci/
Dpci_dma.c335 int npages, ret; in s390_dma_unmap_pages() local
337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); in s390_dma_unmap_pages()
339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, in s390_dma_unmap_pages()
347 atomic64_add(npages, &zdev->unmapped_pages); in s390_dma_unmap_pages()
349 dma_free_iommu(zdev, iommu_page_index, npages); in s390_dma_unmap_pages()
/linux-4.4.14/fs/fuse/
Dfuse_i.h791 struct fuse_req *fuse_request_alloc(unsigned npages);
793 struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
804 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
806 unsigned npages);
Ddev.c39 unsigned npages) in fuse_request_init() argument
42 memset(pages, 0, sizeof(*pages) * npages); in fuse_request_init()
43 memset(page_descs, 0, sizeof(*page_descs) * npages); in fuse_request_init()
50 req->max_pages = npages; in fuse_request_init()
54 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) in __fuse_request_alloc() argument
61 if (npages <= FUSE_REQ_INLINE_PAGES) { in __fuse_request_alloc()
65 pages = kmalloc(sizeof(struct page *) * npages, flags); in __fuse_request_alloc()
67 npages, flags); in __fuse_request_alloc()
77 fuse_request_init(req, pages, page_descs, npages); in __fuse_request_alloc()
82 struct fuse_req *fuse_request_alloc(unsigned npages) in fuse_request_alloc() argument
[all …]
Dfile.c1267 unsigned npages; in fuse_get_user_pages() local
1281 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; in fuse_get_user_pages()
1284 fuse_page_descs_length_init(req, req->num_pages, npages); in fuse_get_user_pages()
1286 req->num_pages += npages; in fuse_get_user_pages()
/linux-4.4.14/arch/parisc/mm/
Dinit.c296 unsigned long npages; in setup_bootmem() local
299 npages = pmem_ranges[i].pages; in setup_bootmem()
304 (start_pfn + npages) ); in setup_bootmem()
307 (npages << PAGE_SHIFT) ); in setup_bootmem()
309 if ((start_pfn + npages) > max_pfn) in setup_bootmem()
310 max_pfn = start_pfn + npages; in setup_bootmem()
/linux-4.4.14/arch/powerpc/platforms/cell/
Diommu.c167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, in tce_build_cell() argument
201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) in tce_build_cell()
206 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_build_cell()
209 index, npages, direction, base_pte); in tce_build_cell()
213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) in tce_free_cell() argument
221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); in tce_free_cell()
235 for (i = 0; i < npages; i++) in tce_free_cell()
240 invalidate_tce_cache(window->iommu, io_pte, npages); in tce_free_cell()
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmr.c481 int npages, u64 iova) in mlx4_ib_map_phys_fmr() argument
486 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
543 if (unlikely(mr->npages == mr->max_pages)) in mlx4_set_page()
546 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); in mlx4_set_page()
558 mr->npages = 0; in mlx4_ib_map_mr_sg()
Dmlx4_ib.h138 u32 npages; member
769 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
Dsrq.c165 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, in mlx4_ib_create_srq()
Dcq.c111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, in mlx4_ib_alloc_cq_buf()
/linux-4.4.14/fs/f2fs/
Ddebug.c134 unsigned npages; in update_mem_info() local
200 npages = NODE_MAPPING(sbi)->nrpages; in update_mem_info()
201 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; in update_mem_info()
202 npages = META_MAPPING(sbi)->nrpages; in update_mem_info()
203 si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; in update_mem_info()
Ddir.c219 unsigned long npages = dir_blocks(dir); in f2fs_find_entry() local
237 if (npages == 0) in f2fs_find_entry()
826 unsigned long npages = dir_blocks(inode); in f2fs_readdir() local
852 if (npages - n > 1 && !ra_has_index(ra, n)) in f2fs_readdir()
854 min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); in f2fs_readdir()
856 for (; n < npages; n++) { in f2fs_readdir()
Df2fs.h1294 static inline struct bio *f2fs_bio_alloc(int npages) in f2fs_bio_alloc() argument
1299 bio = bio_alloc(GFP_NOIO, npages); in f2fs_bio_alloc()
1301 bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); in f2fs_bio_alloc()
Ddata.c89 int npages, bool is_read) in __bio_alloc() argument
93 bio = f2fs_bio_alloc(npages); in __bio_alloc()
Dsegment.c1630 int npages = npages_for_summary_flush(sbi, true); in restore_curseg_summaries() local
1632 if (npages >= 2) in restore_curseg_summaries()
1633 ra_meta_pages(sbi, start_sum_block(sbi), npages, in restore_curseg_summaries()
/linux-4.4.14/arch/powerpc/platforms/powernv/
Dpci.h205 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
208 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
Dpci-ioda.c1641 unsigned long index, unsigned long npages, bool rm) in pnv_pci_ioda1_tce_invalidate() argument
1656 npages - 1); in pnv_pci_ioda1_tce_invalidate()
1693 long npages, unsigned long uaddr, in pnv_ioda1_tce_build() argument
1697 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, in pnv_ioda1_tce_build()
1701 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false); in pnv_ioda1_tce_build()
1721 long npages) in pnv_ioda1_tce_free() argument
1723 pnv_tce_free(tbl, index, npages); in pnv_ioda1_tce_free()
1726 pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false); in pnv_ioda1_tce_free()
1753 unsigned long index, unsigned long npages) in pnv_pci_ioda2_do_tce_invalidate() argument
1764 end |= ((index + npages - 1) << shift); in pnv_pci_ioda2_do_tce_invalidate()
[all …]
Dpci.c596 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, in pnv_tce_build() argument
607 for (i = 0; i < npages; i++) { in pnv_tce_build()
639 void pnv_tce_free(struct iommu_table *tbl, long index, long npages) in pnv_tce_free() argument
643 for (i = 0; i < npages; i++) { in pnv_tce_free()
/linux-4.4.14/arch/parisc/kernel/
Dinventory.c135 unsigned long npages; in pagezero_memconfig() local
150 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); in pagezero_memconfig()
151 set_pmem_entry(pmem_ranges,0UL,npages); in pagezero_memconfig()
/linux-4.4.14/net/sunrpc/auth_gss/
Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++) in gssp_free_receive_pages()
226 arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); in gssp_alloc_receive_pages()
227 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); in gssp_alloc_receive_pages()
Dgss_rpc_xdr.h151 unsigned int npages; member
Dgss_rpc_xdr.c784 arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); in gssx_enc_accept_sec_context()
/linux-4.4.14/net/sunrpc/xprtrdma/
Drpc_rdma.c367 int i, npages, curlen; in rpcrdma_inline_pullup() local
399 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; in rpcrdma_inline_pullup()
400 for (i = 0; copy_len && i < npages; i++) { in rpcrdma_inline_pullup()
620 int i, npages, curlen, olen; in rpcrdma_inline_fixup() local
647 npages = PAGE_ALIGN(page_base + in rpcrdma_inline_fixup()
649 for (; i < npages; i++) { in rpcrdma_inline_fixup()
/linux-4.4.14/arch/mips/kvm/
Dmips.c195 unsigned long npages) in kvm_arch_create_memslot() argument
214 unsigned long npages = 0; in kvm_arch_commit_memory_region() local
224 npages = mem->memory_size >> PAGE_SHIFT; in kvm_arch_commit_memory_region()
226 if (npages) { in kvm_arch_commit_memory_region()
227 kvm->arch.guest_pmap_npages = npages; in kvm_arch_commit_memory_region()
229 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); in kvm_arch_commit_memory_region()
237 npages, kvm->arch.guest_pmap); in kvm_arch_commit_memory_region()
240 for (i = 0; i < npages; i++) in kvm_arch_commit_memory_region()
995 ga_end = ga + (memslot->npages << PAGE_SHIFT); in kvm_vm_ioctl_get_dirty_log()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Duser_sdma.c176 unsigned npages; member
963 pageidx == iovec->npages && in user_sdma_send_pkts()
1047 iovec->npages = num_user_pages(&iovec->iov); in pin_vector_pages()
1048 iovec->pages = kcalloc(iovec->npages, sizeof(*iovec->pages), in pin_vector_pages()
1060 iovec->npages, 0, iovec->pages); in pin_vector_pages()
1064 if (pinned != iovec->npages) { in pin_vector_pages()
1066 iovec->npages); in pin_vector_pages()
1087 for (i = 0; i < iovec->npages; i++) in unpin_vector_pages()
1092 iovec->npages = 0; in unpin_vector_pages()
1424 if (req->iovs[i].npages && req->iovs[i].pages) in user_sdma_free_request()
Dfile_ops.c1573 unsigned tid, mapped = 0, npages, ngroups, exp_groups, in exp_tid_setup() local
1588 npages = num_user_pages(vaddr, tinfo->length); in exp_tid_setup()
1589 if (!npages) { in exp_tid_setup()
1594 npages * PAGE_SIZE)) { in exp_tid_setup()
1596 (void *)vaddr, npages); in exp_tid_setup()
1619 mapped < npages && idx <= uctxt->tidmapcnt;) { in exp_tid_setup()
1640 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) + in exp_tid_setup()
1641 !!((npages - mapped) % dd->rcv_entries.group_size); in exp_tid_setup()
1675 (npages - mapped)); in exp_tid_setup()
/linux-4.4.14/include/uapi/xen/
Dprivcmd.h52 __u64 npages; member
/linux-4.4.14/arch/x86/entry/vdso/
Dvma.c30 int npages = (image->size) / PAGE_SIZE; in init_vdso_image() local
33 for (i = 0; i < npages; i++) in init_vdso_image()
/linux-4.4.14/include/rdma/
Dib_umem.h57 int npages; member
/linux-4.4.14/net/sunrpc/
Dsvcsock.c939 unsigned int i, len, npages; in svc_tcp_restore_pages() local
944 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_restore_pages()
945 for (i = 0; i < npages; i++) { in svc_tcp_restore_pages()
958 unsigned int i, len, npages; in svc_tcp_save_pages() local
963 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_save_pages()
964 for (i = 0; i < npages; i++) { in svc_tcp_save_pages()
972 unsigned int i, len, npages; in svc_tcp_clear_pages() local
977 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in svc_tcp_clear_pages()
978 for (i = 0; i < npages; i++) { in svc_tcp_clear_pages()
/linux-4.4.14/include/crypto/
Dif_alg.h68 unsigned int npages; member
/linux-4.4.14/fs/ceph/
Dfile.c72 int ret = 0, idx, npages; in dio_get_pages_alloc() local
76 npages = calc_pages_for(align, nbytes); in dio_get_pages_alloc()
77 pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL); in dio_get_pages_alloc()
79 pages = vmalloc(sizeof(*pages) * npages); in dio_get_pages_alloc()
84 for (idx = 0; idx < npages; ) { in dio_get_pages_alloc()
87 npages - idx, &start); in dio_get_pages_alloc()
97 *num_pages = npages; in dio_get_pages_alloc()
99 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align); in dio_get_pages_alloc()
/linux-4.4.14/fs/nfs/blocklayout/
Dblocklayout.c240 header->page_array.npages, f_offset, in bl_read_pagelist()
252 for (i = pg_index; i < header->page_array.npages; i++) { in bl_read_pagelist()
285 header->page_array.npages - i, in bl_read_pagelist()
398 for (i = pg_index; i < header->page_array.npages; i++) { in bl_write_pagelist()
412 bio = do_add_page_to_bio(bio, header->page_array.npages - i, in bl_write_pagelist()
/linux-4.4.14/fs/jfs/
Djfs_dmap.c4048 #define BMAPPGTOLEV(npages) \ argument
4049 (((npages) <= 3 + MAXL0PAGES) ? 0 : \
4050 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
4056 s64 npages, ndmaps; in dbMapFileSizeToMapSize() local
4061 npages = nblocks >> JFS_SBI(sb)->l2nbperpage; in dbMapFileSizeToMapSize()
4062 level = BMAPPGTOLEV(npages); in dbMapFileSizeToMapSize()
4069 npages--; /* skip the first global control page */ in dbMapFileSizeToMapSize()
4071 npages -= (2 - level); in dbMapFileSizeToMapSize()
4072 npages--; /* skip top level's control page */ in dbMapFileSizeToMapSize()
4076 complete = (u32) npages / factor; in dbMapFileSizeToMapSize()
[all …]
Djfs_logmgr.c2386 int npages = 0; in lmLogFormat() local
2397 npages = logSize >> sbi->l2nbperpage; in lmLogFormat()
2417 logsuper->size = cpu_to_le32(npages); in lmLogFormat()
2455 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); in lmLogFormat()
2474 for (lspn = 0; lspn < npages - 3; lspn++) { in lmLogFormat()
Djfs_xtree.c2576 int nb, npages, nblks; in xtRelocate() local
2676 npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE; in xtRelocate()
2686 offset += nb, pno++, npages--) { in xtRelocate()
2691 if (rc = cmRead(ip, offset, npages, &cp)) in xtRelocate()
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event_intel_pt.c683 unsigned long idx, npages, wakeup; in pt_buffer_reset_markers() local
701 npages = handle->size >> PAGE_SHIFT; in pt_buffer_reset_markers()
705 npages++; in pt_buffer_reset_markers()
707 idx = (head >> PAGE_SHIFT) + npages; in pt_buffer_reset_markers()
714 idx = (head >> PAGE_SHIFT) + npages - 1; in pt_buffer_reset_markers()
/linux-4.4.14/drivers/block/drbd/
Ddrbd_bitmap.c640 struct page **npages, **opages = NULL; in drbd_bm_resize() local
697 npages = b->bm_pages; in drbd_bm_resize()
700 npages = NULL; in drbd_bm_resize()
702 npages = bm_realloc_pages(b, want); in drbd_bm_resize()
705 if (!npages) { in drbd_bm_resize()
719 b->bm_pages = npages; in drbd_bm_resize()
742 if (opages != npages) in drbd_bm_resize()
/linux-4.4.14/drivers/usb/mon/
Dmon_bin.c220 static int mon_alloc_buff(struct mon_pgmap *map, int npages);
221 static void mon_free_buff(struct mon_pgmap *map, int npages);
1305 static int mon_alloc_buff(struct mon_pgmap *map, int npages) in mon_alloc_buff() argument
1310 for (n = 0; n < npages; n++) { in mon_alloc_buff()
1323 static void mon_free_buff(struct mon_pgmap *map, int npages) in mon_free_buff() argument
1327 for (n = 0; n < npages; n++) in mon_free_buff()
/linux-4.4.14/include/linux/mlx4/
Ddevice.h617 int npages; member
1067 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1073 int npages, int page_shift, struct mlx4_mr *mr);
1081 int start_index, int npages, u64 *page_list);
1361 int npages, u64 iova, u32 *lkey, u32 *rkey);
1478 u64 iova, u64 size, int npages,
/linux-4.4.14/drivers/staging/lustre/lustre/llite/
Ddir.c157 int npages; in ll_dir_filler() local
173 for (npages = 1; npages < max_pages; npages++) { in ll_dir_filler()
177 page_pool[npages] = page; in ll_dir_filler()
182 op_data->op_npages = npages; in ll_dir_filler()
204 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages); in ll_dir_filler()
206 for (i = 1; i < npages; i++) { in ll_dir_filler()
Drw26.c216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) in ll_free_user_pages() argument
220 for (i = 0; i < npages; i++) { in ll_free_user_pages()
/linux-4.4.14/sound/pci/emu10k1/
Dmemory.c103 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) in search_empty_map_area() argument
106 int max_size = npages; in search_empty_map_area()
116 if (size == npages) { in search_empty_map_area()
/linux-4.4.14/arch/arm/kvm/
Dmmu.c358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
749 phys_addr_t size = PAGE_SIZE * memslot->npages; in stage2_unmap_memslot()
1165 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
1486 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
1758 if (memslot->base_gfn + memslot->npages >= in kvm_arch_prepare_memory_region()
1835 unsigned long npages) in kvm_arch_create_memslot() argument
1861 phys_addr_t size = slot->npages << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c1092 int npages = p->ibp_npages; in kiblnd_free_pages() local
1095 for (i = 0; i < npages; i++) { in kiblnd_free_pages()
1100 LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_free_pages()
1103 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) in kiblnd_alloc_pages() argument
1109 offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_alloc_pages()
1111 CERROR("Can't allocate descriptor for %d pages\n", npages); in kiblnd_alloc_pages()
1115 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); in kiblnd_alloc_pages()
1116 p->ibp_npages = npages; in kiblnd_alloc_pages()
1118 for (i = 0; i < npages; i++) { in kiblnd_alloc_pages()
1123 CERROR("Can't allocate page %d of %d\n", i, npages); in kiblnd_alloc_pages()
[all …]
Do2iblnd.h919 int npages, __u64 iov, kib_fmr_t *fmr);
935 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
Do2iblnd_cb.c562 int npages; in kiblnd_fmr_map_tx() local
573 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { in kiblnd_fmr_map_tx()
576 pages[npages++] = (rd->rd_frags[i].rf_addr & in kiblnd_fmr_map_tx()
584 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); in kiblnd_fmr_map_tx()
586 CERROR("Can't map %d pages: %d\n", npages, rc); in kiblnd_fmr_map_tx()
/linux-4.4.14/drivers/nvme/host/
Dnvme.h105 int npages; /* In the PRP list. 0 means small pool in use */ member
Dpci.c426 iod->npages = -1; in iod_init()
473 if (iod->npages == 0) in nvme_free_iod()
475 for (i = 0; i < iod->npages; i++) { in nvme_free_iod()
687 iod->npages = 0; in nvme_setup_prps()
690 iod->npages = 1; in nvme_setup_prps()
696 iod->npages = -1; in nvme_setup_prps()
708 list[iod->npages++] = prp_list; in nvme_setup_prps()
881 iod->npages = 0; in nvme_queue_rq()
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_verbs.h85 u32 npages; member
Dnes_verbs.c470 if (unlikely(nesmr->npages == nesmr->max_pages)) in nes_set_page()
473 nesmr->pages[nesmr->npages++] = cpu_to_le64(addr); in nes_set_page()
484 nesmr->npages = 0; in nes_map_mr_sg()
3409 if (mr->npages > (NES_4K_PBL_CHUNK_SIZE / sizeof(u64))) { in nes_post_send()
3459 mr->npages * 8); in nes_post_send()
3468 mr->npages, in nes_post_send()
/linux-4.4.14/drivers/infiniband/ulp/srp/
Dib_srp.h303 unsigned int npages; member
Dib_srp.c1288 if (state->npages == 0) in srp_map_finish_fmr()
1291 if (state->npages == 1 && target->global_mr) { in srp_map_finish_fmr()
1298 state->npages, io_addr); in srp_map_finish_fmr()
1309 state->npages = 0; in srp_map_finish_fmr()
1392 if (state->npages == dev->max_pages_per_mr || offset != 0) { in srp_map_sg_entry()
1400 if (!state->npages) in srp_map_sg_entry()
1402 state->pages[state->npages++] = dma_addr & dev->mr_page_mask; in srp_map_sg_entry()
1534 state.npages = 1; in srp_map_idb()
/linux-4.4.14/fs/
Dsplice.c1454 unsigned long off, npages; in get_iovec_page_array() local
1491 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in get_iovec_page_array()
1492 if (npages > pipe_buffers - buffers) in get_iovec_page_array()
1493 npages = pipe_buffers - buffers; in get_iovec_page_array()
1495 error = get_user_pages_fast((unsigned long)base, npages, in get_iovec_page_array()
1528 if (error < npages || buffers == pipe_buffers) in get_iovec_page_array()
/linux-4.4.14/drivers/edac/
Di5100_edac.c857 const unsigned long npages = i5100_npages(mci, i); in i5100_init_csrows() local
861 if (!npages) in i5100_init_csrows()
867 dimm->nr_pages = npages; in i5100_init_csrows()
877 chan, rank, (long)PAGES_TO_MiB(npages)); in i5100_init_csrows()
Dsb_edac.c932 unsigned i, j, banks, ranks, rows, cols, npages; in get_dimm_config() local
1010 npages = MiB_TO_PAGES(size); in get_dimm_config()
1014 size, npages, in get_dimm_config()
1017 dimm->nr_pages = npages; in get_dimm_config()
Di7core_edac.c595 u32 size, npages; in get_dimm_config() local
615 npages = MiB_TO_PAGES(size); in get_dimm_config()
617 dimm->nr_pages = npages; in get_dimm_config()
/linux-4.4.14/include/linux/mlx5/
Ddriver.h295 int npages; member
712 gfp_t flags, int npages);
743 s32 npages);
/linux-4.4.14/fs/afs/
Ddir.c93 __be16 npages; member
143 if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { in afs_dir_check_page()
146 ntohs(dbuf->blocks[0].pagehdr.npages)); in afs_dir_check_page()
/linux-4.4.14/net/rds/
Dib_rdma.c321 int npages) in rds_ib_alloc_fmr() argument
327 if (npages <= RDS_FMR_8K_MSG_SIZE) in rds_ib_alloc_fmr()
Dib.h352 int npages);
/linux-4.4.14/mm/
Dnommu.c1517 unsigned long npages; in split_vma() local
1542 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1548 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1559 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/
Dframework.c1101 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, in sfw_alloc_pages() argument
1105 LASSERT(npages > 0 && npages <= LNET_MAX_IOV); in sfw_alloc_pages()
1107 rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); in sfw_alloc_pages()
Dselftest.h428 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
/linux-4.4.14/drivers/scsi/cxgbi/
Dlibcxgbi.c1389 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> in ddp_make_gl() local
1401 npages * (sizeof(dma_addr_t) + in ddp_make_gl()
1405 "xfer %u, %u pages, OOM.\n", xferlen, npages); in ddp_make_gl()
1410 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); in ddp_make_gl()
1412 gl->pages = (struct page **)&gl->phys_addr[npages]; in ddp_make_gl()
1413 gl->nelem = npages; in ddp_make_gl()
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma.h197 u32 npages; member
Docrdma_verbs.c2178 fast_reg->num_sges = mr->npages; in ocrdma_build_reg()
2182 for (i = 0; i < mr->npages; i++) { in ocrdma_build_reg()
3236 if (unlikely(mr->npages == mr->hwmr.num_pbes)) in ocrdma_set_page()
3239 mr->pages[mr->npages++] = addr; in ocrdma_set_page()
3250 mr->npages = 0; in ocrdma_map_mr_sg()
/linux-4.4.14/net/core/
Dskbuff.c4463 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in alloc_skb_with_frags() local
4474 if (npages > MAX_SKB_FRAGS) in alloc_skb_with_frags()
4486 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
4488 for (i = 0; npages > 0; i++) { in alloc_skb_with_frags()
4492 if (npages >= 1 << order) { in alloc_skb_with_frags()
4514 npages -= 1 << order; in alloc_skb_with_frags()
/linux-4.4.14/drivers/vhost/
Dscsi.c641 unsigned int npages = 0, offset, nbytes; in vhost_scsi_map_to_sgl() local
669 sg_set_page(sg, pages[npages], nbytes, offset); in vhost_scsi_map_to_sgl()
673 npages++; in vhost_scsi_map_to_sgl()
/linux-4.4.14/fs/cifs/
Dfile.c2906 unsigned int npages, rsize, credits; in cifs_send_async_read() local
2926 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); in cifs_send_async_read()
2929 rdata = cifs_readdata_alloc(npages, in cifs_send_async_read()
2937 rc = cifs_read_allocate_pages(rdata, npages); in cifs_send_async_read()
2942 rdata->nr_pages = npages; in cifs_send_async_read()
/linux-4.4.14/arch/s390/kvm/
Dkvm-s390.c275 last_gfn = memslot->base_gfn + memslot->npages; in kvm_s390_sync_dirty_log()
2678 unsigned long npages) in kvm_arch_create_memslot() argument
2719 old->npages * PAGE_SIZE == mem->memory_size) in kvm_arch_commit_memory_region()
/linux-4.4.14/drivers/staging/lustre/lustre/include/
Dlustre_net.h2391 unsigned npages, unsigned max_brw,

12