Searched refs:npages (Results 1 - 186 of 186) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_mem.c81 int npages) iwch_reregister_mem()
87 if (npages > mhp->attr.pbl_size) iwch_reregister_mem()
109 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) iwch_alloc_pbl() argument
112 npages << 3); iwch_alloc_pbl()
117 mhp->attr.pbl_size = npages; iwch_alloc_pbl()
128 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) iwch_write_pbl() argument
131 mhp->attr.pbl_addr + (offset << 3), npages); iwch_write_pbl()
138 int *npages, build_phys_page_list()
176 *npages = 0; build_phys_page_list()
178 *npages += (buffer_list[i].size + build_phys_page_list()
181 if (!*npages) build_phys_page_list()
184 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); build_phys_page_list()
199 *npages); build_phys_page_list()
78 iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php, struct iwch_mr *mhp, int shift, int npages) iwch_reregister_mem() argument
134 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) build_phys_page_list() argument
H A Dcxio_dbg.c78 int size, npages; cxio_dump_pbl() local
81 npages = (len + (1ULL << shift) - 1) >> shift; cxio_dump_pbl()
82 size = npages * sizeof(u64); cxio_dump_pbl()
93 __func__, m->addr, m->len, npages); cxio_dump_pbl()
H A Diwch_provider.h345 int npages);
346 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages);
348 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset);
353 int *npages,
H A Diwch_provider.c481 int npages; iwch_register_phys_mem() local
510 &total_size, &npages, &shift, &page_list); iwch_register_phys_mem()
514 ret = iwch_alloc_pbl(mhp, npages); iwch_register_phys_mem()
520 ret = iwch_write_pbl(mhp, page_list, npages, 0); iwch_register_phys_mem()
533 mhp->attr.pbl_size = npages; iwch_register_phys_mem()
563 int npages = 0; iwch_reregister_phys_mem() local
589 &total_size, &npages, iwch_reregister_phys_mem()
595 ret = iwch_reregister_mem(rhp, php, &mh, shift, npages); iwch_reregister_phys_mem()
609 mhp->attr.pbl_size = npages; iwch_reregister_phys_mem()
/linux-4.1.27/arch/sparc/kernel/
H A Dpci_sun4v.c43 unsigned long npages; /* Number of pages in list. */ member in struct:iommu_batch
57 p->npages = 0; iommu_batch_start()
68 unsigned long npages = p->npages; iommu_batch_flush() local
70 while (npages != 0) { iommu_batch_flush()
74 npages, prot, __pa(pglist)); iommu_batch_flush()
81 npages, prot, __pa(pglist), num); iommu_batch_flush()
86 npages -= num; iommu_batch_flush()
91 p->npages = 0; iommu_batch_flush()
100 if (p->entry + p->npages == entry) iommu_batch_new_entry()
112 BUG_ON(p->npages >= PGLIST_NENTS); iommu_batch_add()
114 p->pglist[p->npages++] = phys_page; iommu_batch_add()
115 if (p->npages == PGLIST_NENTS) iommu_batch_add()
126 BUG_ON(p->npages >= PGLIST_NENTS); iommu_batch_end()
135 unsigned long flags, order, first_page, npages, n; dma_4v_alloc_coherent() local
147 npages = size >> IO_PAGE_SHIFT; dma_4v_alloc_coherent()
159 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_alloc_coherent()
176 for (n = 0; n < npages; n++) { dma_4v_alloc_coherent()
190 iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); dma_4v_alloc_coherent()
198 unsigned long npages) dma_4v_iommu_demap()
207 npages); dma_4v_iommu_demap()
210 npages -= num; dma_4v_iommu_demap()
211 } while (npages != 0); dma_4v_iommu_demap()
220 unsigned long order, npages, entry; dma_4v_free_coherent() local
223 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; dma_4v_free_coherent()
228 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_free_coherent()
229 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); dma_4v_free_coherent()
241 unsigned long flags, npages, oaddr; dma_4v_map_page() local
253 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); dma_4v_map_page()
254 npages >>= IO_PAGE_SHIFT; dma_4v_map_page()
256 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, dma_4v_map_page()
273 for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { dma_4v_map_page()
291 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); dma_4v_map_page()
301 unsigned long npages; dma_4v_unmap_page() local
315 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); dma_4v_unmap_page()
316 npages >>= IO_PAGE_SHIFT; dma_4v_unmap_page()
319 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_page()
320 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); dma_4v_unmap_page()
364 unsigned long paddr, npages, entry, out_entry = 0, slen; for_each_sg() local
374 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); for_each_sg()
375 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, for_each_sg()
382 " npages %lx\n", iommu, paddr, npages); for_each_sg()
394 while (npages--) { for_each_sg()
448 unsigned long vaddr, npages; for_each_sg() local
451 npages = iommu_num_pages(s->dma_address, s->dma_length, for_each_sg()
453 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, for_each_sg()
489 unsigned long npages; dma_4v_unmap_sg() local
495 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); dma_4v_unmap_sg()
497 dma_4v_iommu_demap(&devhandle, entry, npages); dma_4v_unmap_sg()
498 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, dma_4v_unmap_sg()
197 dma_4v_iommu_demap(void *demap_arg, unsigned long entry, unsigned long npages) dma_4v_iommu_demap() argument
H A Diommu.c158 unsigned long npages) alloc_npages()
162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, alloc_npages()
204 int npages, nid; dma_4u_alloc_coherent() local
233 npages = size >> IO_PAGE_SHIFT; dma_4u_alloc_coherent()
235 while (npages--) { dma_4u_alloc_coherent()
251 unsigned long order, npages; dma_4u_free_coherent() local
253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; dma_4u_free_coherent()
256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); dma_4u_free_coherent()
271 unsigned long flags, npages, oaddr; dma_4u_map_page() local
283 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); dma_4u_map_page()
284 npages >>= IO_PAGE_SHIFT; dma_4u_map_page()
286 base = alloc_npages(dev, iommu, npages); dma_4u_map_page()
307 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) dma_4u_map_page()
321 u32 vaddr, unsigned long ctx, unsigned long npages, strbuf_flush()
356 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) strbuf_flush()
382 "vaddr[%08x] ctx[%lx] npages[%ld]\n", strbuf_flush()
383 vaddr, ctx, npages); strbuf_flush()
393 unsigned long flags, npages, ctx, i; dma_4u_unmap_page() local
404 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); dma_4u_unmap_page()
405 npages >>= IO_PAGE_SHIFT; dma_4u_unmap_page()
420 npages, direction); dma_4u_unmap_page()
423 for (i = 0; i < npages; i++) dma_4u_unmap_page()
429 iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); dma_4u_unmap_page()
479 unsigned long paddr, npages, entry, out_entry = 0, slen; for_each_sg() local
490 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); for_each_sg()
491 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, for_each_sg()
498 " npages %lx\n", iommu, paddr, npages); for_each_sg()
511 while (npages--) { for_each_sg()
559 unsigned long vaddr, npages, entry, j; for_each_sg() local
563 npages = iommu_num_pages(s->dma_address, s->dma_length, for_each_sg()
570 for (j = 0; j < npages; j++) for_each_sg()
573 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, for_each_sg()
630 unsigned long npages, entry; dma_4u_unmap_sg() local
636 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); dma_4u_unmap_sg()
645 npages, direction); dma_4u_unmap_sg()
647 for (i = 0; i < npages; i++) dma_4u_unmap_sg()
650 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, dma_4u_unmap_sg()
666 unsigned long flags, ctx, npages; dma_4u_sync_single_for_cpu() local
676 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); dma_4u_sync_single_for_cpu()
677 npages >>= IO_PAGE_SHIFT; dma_4u_sync_single_for_cpu()
693 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); dma_4u_sync_single_for_cpu()
704 unsigned long flags, ctx, npages, i; dma_4u_sync_sg_for_cpu() local
737 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
739 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
156 alloc_npages(struct device *dev, struct iommu *iommu, unsigned long npages) alloc_npages() argument
320 strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, enum dma_data_direction direction) strbuf_flush() argument
H A Dldc.c1017 unsigned long entry, unsigned long npages) ldc_demap()
1024 for (i = 0; i < npages; i++) { ldc_demap()
1950 unsigned long npages) alloc_npages()
1955 npages, NULL, (unsigned long)-1, 0); alloc_npages()
2084 unsigned long i, npages; ldc_map_sg() local
2097 npages = err; ldc_map_sg()
2103 base = alloc_npages(iommu, npages); ldc_map_sg()
2128 unsigned long npages, pa; ldc_map_single() local
2140 npages = pages_in_region(pa, len); ldc_map_single()
2144 base = alloc_npages(iommu, npages); ldc_map_single()
2166 unsigned long npages, entry; free_npages() local
2168 npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; free_npages()
2171 ldc_demap(iommu, id, cookie, entry, npages); free_npages()
2172 iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry); free_npages()
1016 ldc_demap(struct ldc_iommu *iommu, unsigned long id, u64 cookie, unsigned long entry, unsigned long npages) ldc_demap() argument
1949 alloc_npages(struct ldc_iommu *iommu, unsigned long npages) alloc_npages() argument
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
H A Dpagealloc.c55 s32 npages; member in struct:mlx5_pages_req
166 s32 *npages, int boot) mlx5_cmd_query_pages()
184 *npages = be32_to_cpu(out.num_pages); mlx5_cmd_query_pages()
278 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, give_pages() argument
289 inlen = sizeof(*in) + npages * sizeof(in->pas[0]); give_pages()
297 for (i = 0; i < npages; i++) { give_pages()
314 in->num_entries = cpu_to_be32(npages); give_pages()
317 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", give_pages()
318 func_id, npages, err); give_pages()
321 dev->priv.fw_pages += npages; give_pages()
326 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", give_pages()
327 func_id, npages, out.hdr.status); give_pages()
359 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, reclaim_pages() argument
374 outlen = sizeof(*out) + npages * sizeof(out->pas[0]); reclaim_pages()
382 in.num_entries = cpu_to_be32(npages); reclaim_pages()
383 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); reclaim_pages()
389 dev->priv.fw_pages -= npages; reclaim_pages()
416 if (req->npages < 0) pages_work_handler()
417 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); pages_work_handler()
418 else if (req->npages > 0) pages_work_handler()
419 err = give_pages(dev, req->func_id, req->npages, 1); pages_work_handler()
423 req->npages < 0 ? "reclaim" : "give", err); pages_work_handler()
429 s32 npages) mlx5_core_req_pages_handler()
441 req->npages = npages; mlx5_core_req_pages_handler()
449 s32 uninitialized_var(npages); mlx5_satisfy_startup_pages()
452 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); mlx5_satisfy_startup_pages()
457 npages, boot ? "boot" : "init", func_id); mlx5_satisfy_startup_pages()
459 return give_pages(dev, func_id, npages, 0); mlx5_satisfy_startup_pages()
165 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) mlx5_cmd_query_pages() argument
428 mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages) mlx5_core_req_pages_handler() argument
H A Dalloc.c58 buf->npages = 1; mlx5_buf_alloc()
69 buf->npages *= 2; mlx5_buf_alloc()
76 buf->npages = buf->nbufs; mlx5_buf_alloc()
232 for (i = 0; i < buf->npages; i++) { mlx5_fill_page_array()
H A Deq.c276 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); mlx5_eq_int() local
278 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", mlx5_eq_int()
279 func_id, npages); mlx5_eq_int()
280 mlx5_core_req_pages_handler(dev, func_id, npages); mlx5_eq_int()
356 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; mlx5_create_map_eq()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_page_alloc.c70 * @npages: Number of pages in pool.
77 unsigned npages; member in struct:ttm_page_pool
276 static void ttm_pages_put(struct page *pages[], unsigned npages) ttm_pages_put() argument
279 if (set_pages_array_wb(pages, npages)) ttm_pages_put()
280 pr_err("Failed to set %d pages to wb!\n", npages); ttm_pages_put()
281 for (i = 0; i < npages; ++i) ttm_pages_put()
288 pool->npages -= freed_pages; ttm_pool_update_free_locked()
430 count += _manager->pools[i].npages; ttm_pool_shrink_count()
593 && count > pool->npages) { ttm_page_pool_fill_locked()
611 pool->npages += alloc_size; ttm_page_pool_fill_locked()
619 pool->npages += cpages; ttm_page_pool_fill_locked()
644 if (count >= pool->npages) { ttm_page_pool_get_pages()
647 count -= pool->npages; ttm_page_pool_get_pages()
648 pool->npages = 0; ttm_page_pool_get_pages()
653 if (count <= pool->npages/2) { ttm_page_pool_get_pages()
660 i = pool->npages + 1; ttm_page_pool_get_pages()
668 pool->npages -= count; ttm_page_pool_get_pages()
676 static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ttm_put_pages() argument
685 for (i = 0; i < npages; i++) { ttm_put_pages()
697 for (i = 0; i < npages; i++) { ttm_put_pages()
703 pool->npages++; ttm_put_pages()
707 npages = 0; ttm_put_pages()
708 if (pool->npages > _manager->options.max_size) { ttm_put_pages()
709 npages = pool->npages - _manager->options.max_size; ttm_put_pages()
712 if (npages < NUM_PAGES_TO_ALLOC) ttm_put_pages()
713 npages = NUM_PAGES_TO_ALLOC; ttm_put_pages()
716 if (npages) ttm_put_pages()
717 ttm_page_pool_free(pool, npages, false); ttm_put_pages()
724 static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ttm_get_pages() argument
745 for (r = 0; r < npages; ++r) { ttm_get_pages()
763 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); ttm_get_pages()
780 if (npages > 0) { ttm_get_pages()
785 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages); ttm_get_pages()
807 pool->npages = pool->nfrees = 0; ttm_page_pool_init_locked()
936 p->nfrees, p->npages); ttm_page_alloc_debugfs()
H A Dttm_page_alloc_dma.c379 struct page *pages[], unsigned npages) ttm_dma_pages_put()
384 if (npages && !(pool->type & IS_CACHED) && ttm_dma_pages_put()
385 set_pages_array_wb(pages, npages)) ttm_dma_pages_put()
387 pool->dev_name, npages); ttm_dma_pages_put()
939 unsigned count = 0, i, npages = 0; ttm_dma_unpopulate() local
963 npages = count; ttm_dma_unpopulate()
965 npages = pool->npages_free - _manager->options.max_size; ttm_dma_unpopulate()
968 if (npages < NUM_PAGES_TO_ALLOC) ttm_dma_unpopulate()
969 npages = NUM_PAGES_TO_ALLOC; ttm_dma_unpopulate()
995 if (npages) ttm_dma_unpopulate()
996 ttm_dma_page_pool_free(pool, npages, false); ttm_dma_unpopulate()
378 ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, struct page *pages[], unsigned npages) ttm_dma_pages_put() argument
H A Dttm_bo.c1183 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; ttm_bo_acc_size() local
1187 size += PAGE_ALIGN(npages * sizeof(void *)); ttm_bo_acc_size()
1197 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; ttm_bo_dma_acc_size() local
1201 size += PAGE_ALIGN(npages * sizeof(void *)); ttm_bo_dma_acc_size()
1202 size += PAGE_ALIGN(npages * sizeof(dma_addr_t)); ttm_bo_dma_acc_size()
/linux-4.1.27/include/linux/
H A Diommu-common.h42 unsigned long npages,
48 u64 dma_addr, unsigned long npages,
H A Dnvme.h135 int npages; /* In the PRP list. 0 means small pool in use */ member in struct:nvme_iod
H A Dkvm_host.h286 unsigned long npages; member in struct:kvm_memory_slot
296 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; kvm_dirty_bitmap_bytes()
439 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
520 unsigned long npages);
820 gfn < memslots[slot].base_gfn + memslots[slot].npages) search_memslots()
833 gfn < memslots[start].base_gfn + memslots[start].npages) { search_memslots()
H A Defi.h1040 static inline void memrange_efi_to_native(u64 *addr, u64 *npages) memrange_efi_to_native() argument
1042 *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); memrange_efi_to_native()
H A Dnfs_xdr.h713 unsigned int npages; member in struct:nfs3_setaclargs
1311 unsigned int npages; /* Max length of pagevec */ member in struct:nfs_page_array
H A Dmm.h1862 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
H A Dicm.c59 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, mlx4_free_icm_pages()
62 for (i = 0; i < chunk->npages; ++i) mlx4_free_icm_pages()
71 for (i = 0; i < chunk->npages; ++i) mlx4_free_icm_coherent()
127 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, mlx4_alloc_icm() argument
153 while (npages > 0) { mlx4_alloc_icm()
168 chunk->npages = 0; mlx4_alloc_icm()
173 while (1 << cur_order > npages) mlx4_alloc_icm()
178 &chunk->mem[chunk->npages], mlx4_alloc_icm()
181 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], mlx4_alloc_icm()
192 ++chunk->npages; mlx4_alloc_icm()
196 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { mlx4_alloc_icm()
198 chunk->npages, mlx4_alloc_icm()
205 if (chunk->npages == MLX4_ICM_CHUNK_LEN) mlx4_alloc_icm()
208 npages -= 1 << cur_order; mlx4_alloc_icm()
213 chunk->npages, mlx4_alloc_icm()
328 for (i = 0; i < chunk->npages; ++i) { mlx4_table_find()
H A Dmr.c197 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, mlx4_mtt_init() argument
202 if (!npages) { mlx4_mtt_init()
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1) mlx4_mtt_init()
420 u64 iova, u64 size, u32 access, int npages, mlx4_mr_alloc_reserved()
430 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); mlx4_mr_alloc_reserved()
530 int npages, int page_shift, struct mlx4_mr *mr) mlx4_mr_alloc()
540 access, npages, page_shift, mr); mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, mlx4_mr_rereg_mem_write()
597 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); mlx4_mr_rereg_mem_write()
695 int start_index, int npages, u64 *page_list) mlx4_write_mtt_chunk()
709 npages * sizeof (u64), DMA_TO_DEVICE); mlx4_write_mtt_chunk()
711 for (i = 0; i < npages; ++i) mlx4_write_mtt_chunk()
715 npages * sizeof (u64), DMA_TO_DEVICE); mlx4_write_mtt_chunk()
721 int start_index, int npages, u64 *page_list) __mlx4_write_mtt()
733 chunk = min_t(int, max_mtts_first_page, npages); __mlx4_write_mtt()
735 while (npages > 0) { __mlx4_write_mtt()
739 npages -= chunk; __mlx4_write_mtt()
743 chunk = min_t(int, mtts_per_page, npages); __mlx4_write_mtt()
749 int start_index, int npages, u64 *page_list) mlx4_write_mtt()
766 while (npages > 0) { mlx4_write_mtt()
768 npages); mlx4_write_mtt()
780 npages -= chunk; mlx4_write_mtt()
788 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); mlx4_write_mtt()
799 page_list = kmalloc(buf->npages * sizeof *page_list, mlx4_buf_write_mtt()
804 for (i = 0; i < buf->npages; ++i) mlx4_buf_write_mtt()
810 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); mlx4_buf_write_mtt()
973 int npages, u64 iova) mlx4_check_fmr()
977 if (npages > fmr->max_pages) mlx4_check_fmr()
988 for (i = 0; i < npages; ++i) { mlx4_check_fmr()
1000 int npages, u64 iova, u32 *lkey, u32 *rkey) mlx4_map_phys_fmr()
1005 err = mlx4_check_fmr(fmr, page_list, npages, iova); mlx4_map_phys_fmr()
1021 npages * sizeof(u64), DMA_TO_DEVICE); mlx4_map_phys_fmr()
1023 for (i = 0; i < npages; ++i) mlx4_map_phys_fmr()
1027 npages * sizeof(u64), DMA_TO_DEVICE); mlx4_map_phys_fmr()
1031 fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); mlx4_map_phys_fmr()
419 mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) mlx4_mr_alloc_reserved() argument
529 mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int npages, int page_shift, struct mlx4_mr *mr) mlx4_mr_alloc() argument
591 mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, u64 iova, u64 size, int npages, int page_shift, struct mlx4_mpt_entry *mpt_entry) mlx4_mr_rereg_mem_write() argument
694 mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) mlx4_write_mtt_chunk() argument
720 __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) __mlx4_write_mtt() argument
748 mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int start_index, int npages, u64 *page_list) mlx4_write_mtt() argument
972 mlx4_check_fmr(struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova) mlx4_check_fmr() argument
999 mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, int npages, u64 iova, u32 *lkey, u32 *rkey) mlx4_map_phys_fmr() argument
H A Dicm.h52 int npages; member in struct:mlx4_icm_chunk
70 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
H A Deq.c926 int npages; mlx4_create_eq() local
938 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; mlx4_create_eq()
940 eq->page_list = kmalloc(npages * sizeof *eq->page_list, mlx4_create_eq()
945 for (i = 0; i < npages; ++i) mlx4_create_eq()
948 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); mlx4_create_eq()
957 for (i = 0; i < npages; ++i) { mlx4_create_eq()
981 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); mlx4_create_eq()
985 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); mlx4_create_eq()
1025 for (i = 0; i < npages; ++i) mlx4_create_eq()
1050 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; mlx4_free_eq() local
1060 for (i = 0; i < npages; ++i) mlx4_free_eq()
H A Dalloc.c593 buf->npages = 1; mlx4_buf_alloc()
604 buf->npages *= 2; mlx4_buf_alloc()
613 buf->npages = buf->nbufs; mlx4_buf_alloc()
806 err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, mlx4_alloc_hwq_res()
H A Dmlx4.h997 int start_index, int npages, u64 *page_list);
H A Dresource_tracker.c3021 int npages = vhcr->in_modifier; mlx4_WRITE_MTT_wrapper() local
3024 err = get_containing_mtt(dev, slave, start, npages, &rmtt); mlx4_WRITE_MTT_wrapper()
3035 for (i = 0; i < npages; ++i) mlx4_WRITE_MTT_wrapper()
3038 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, mlx4_WRITE_MTT_wrapper()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_bulk.c174 static void enc_pools_release_free_pages(long npages) enc_pools_release_free_pages() argument
179 LASSERT(npages > 0); enc_pools_release_free_pages()
180 LASSERT(npages <= page_pools.epp_free_pages); enc_pools_release_free_pages()
186 page_pools.epp_free_pages -= npages; enc_pools_release_free_pages()
187 page_pools.epp_total_pages -= npages; enc_pools_release_free_pages()
197 while (npages--) { enc_pools_release_free_pages()
276 int npages_to_npools(unsigned long npages) npages_to_npools() argument
278 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL); npages_to_npools()
306 * merge @npools pointed by @pools which contains @npages new pages
312 static void enc_pools_insert(struct page ***pools, int npools, int npages) enc_pools_insert() argument
318 LASSERT(npages > 0); enc_pools_insert()
319 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages); enc_pools_insert()
320 LASSERT(npages_to_npools(npages) == npools); enc_pools_insert()
338 ng_idx = (npages - 1) % PAGES_PER_POOL; enc_pools_insert()
366 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL - 1) enc_pools_insert()
380 page_pools.epp_total_pages += npages; enc_pools_insert()
381 page_pools.epp_free_pages += npages; enc_pools_insert()
387 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages, enc_pools_insert()
393 static int enc_pools_add_pages(int npages) enc_pools_add_pages() argument
400 if (npages < PTLRPC_MAX_BRW_PAGES) enc_pools_add_pages()
401 npages = PTLRPC_MAX_BRW_PAGES; enc_pools_add_pages()
405 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages) enc_pools_add_pages()
406 npages = page_pools.epp_max_pages - page_pools.epp_total_pages; enc_pools_add_pages()
407 LASSERT(npages > 0); enc_pools_add_pages()
411 npools = npages_to_npools(npages); enc_pools_add_pages()
421 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) { enc_pools_add_pages()
430 LASSERT(alloced == npages); enc_pools_add_pages()
432 enc_pools_insert(pools, npools, npages); enc_pools_add_pages()
433 CDEBUG(D_SEC, "added %d pages into pools\n", npages); enc_pools_add_pages()
442 CERROR("Failed to allocate %d enc pages\n", npages); enc_pools_add_pages()
H A Dptlrpc_internal.h56 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw,
H A Dclient.c100 struct ptlrpc_bulk_desc *ptlrpc_new_bulk(unsigned npages, unsigned max_brw, ptlrpc_new_bulk() argument
106 OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[npages])); ptlrpc_new_bulk()
112 desc->bd_max_iov = npages; ptlrpc_new_bulk()
129 * can fit \a npages * pages. \a type is bulk type. \a portal is where
135 unsigned npages, unsigned max_brw, ptlrpc_prep_bulk_imp()
142 desc = ptlrpc_new_bulk(npages, max_brw, type, portal); ptlrpc_prep_bulk_imp()
134 ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, unsigned npages, unsigned max_brw, unsigned type, unsigned portal) ptlrpc_prep_bulk_imp() argument
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_64_vio.c104 long npages; kvm_vm_ioctl_create_spapr_tce() local
114 npages = kvmppc_stt_npages(args->window_size); kvm_vm_ioctl_create_spapr_tce()
116 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), kvm_vm_ioctl_create_spapr_tce()
125 for (i = 0; i < npages; i++) { kvm_vm_ioctl_create_spapr_tce()
143 for (i = 0; i < npages; i++) kvm_vm_ioctl_create_spapr_tce()
H A Dbook3s_64_mmu_hv.c178 unsigned long npages; kvmppc_map_vrma() local
188 npages = memslot->npages >> (porder - PAGE_SHIFT); kvmppc_map_vrma()
191 if (npages > 1ul << (40 - porder)) kvmppc_map_vrma()
192 npages = 1ul << (40 - porder); kvmppc_map_vrma()
194 if (npages > kvm->arch.hpt_mask + 1) kvmppc_map_vrma()
195 npages = kvm->arch.hpt_mask + 1; kvmppc_map_vrma()
202 for (i = 0; i < npages; ++i) { kvmppc_map_vrma()
448 long index, ret, npages; kvmppc_book3s_hv_page_fault() local
513 npages = get_user_pages_fast(hva, 1, writing, pages); kvmppc_book3s_hv_page_fault()
514 if (npages < 1) { kvmppc_book3s_hv_page_fault()
660 memslot->npages * sizeof(*memslot->arch.rmap)); kvm_for_each_memslot()
684 (memslot->npages << PAGE_SHIFT)); kvm_for_each_memslot()
796 for (n = memslot->npages; n; --n) { kvmppc_core_flush_memslot_hv()
1013 gfn >= memslot->base_gfn + memslot->npages) harvest_vpa_dirty()
1030 for (i = 0; i < memslot->npages; ++i) { kvmppc_hv_get_dirty_log()
1031 int npages = kvm_test_clear_dirty_npages(kvm, rmapp); kvmppc_hv_get_dirty_log() local
1033 * Note that if npages > 0 then i must be a multiple of npages, kvmppc_hv_get_dirty_log()
1037 if (npages && map) kvmppc_hv_get_dirty_log()
1038 for (j = i; npages; ++j, --npages) kvmppc_hv_get_dirty_log()
1061 int npages; kvmppc_pin_guest_page() local
1070 npages = get_user_pages_fast(hva, 1, 1, pages); kvmppc_pin_guest_page()
1071 if (npages < 1) kvmppc_pin_guest_page()
H A Dbook3s_hv_rm_mmu.c376 long npages, int global, bool need_sync) do_tlbies()
385 for (i = 0; i < npages; ++i) do_tlbies()
393 for (i = 0; i < npages; ++i) do_tlbies()
375 do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) do_tlbies() argument
H A Dbook3s.c748 unsigned long npages) kvmppc_core_create_memslot()
750 return kvm->arch.kvm_ops->create_memslot(slot, npages); kvmppc_core_create_memslot()
747 kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvmppc_core_create_memslot() argument
H A Dbook3s_pr.c266 (memslot->npages << PAGE_SHIFT)); kvm_for_each_memslot()
1551 ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_vm_ioctl_get_dirty_log_pr()
1593 unsigned long npages) kvmppc_core_create_memslot_pr()
1592 kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, unsigned long npages) kvmppc_core_create_memslot_pr() argument
H A Dpowerpc.c591 unsigned long npages) kvm_arch_create_memslot()
593 return kvmppc_core_create_memslot(kvm, slot, npages); kvm_arch_create_memslot()
590 kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvm_arch_create_memslot() argument
H A Dbook3s_hv.c2372 unsigned long npages) kvmppc_core_create_memslot_hv()
2374 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); kvmppc_core_create_memslot_hv()
2392 unsigned long npages = mem->memory_size >> PAGE_SHIFT; kvmppc_core_commit_memory_region_hv() local
2395 if (npages && old->npages) { kvmppc_core_commit_memory_region_hv()
2371 kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, unsigned long npages) kvmppc_core_create_memslot_hv() argument
H A De500_mmu_host.c384 slot_end = slot_start + slot->npages; kvmppc_e500_shadow_map()
H A Dbooke.c1780 unsigned long npages) kvmppc_core_create_memslot()
1779 kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvmppc_core_create_memslot() argument
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c199 int npages, shift; mthca_buf_alloc() local
206 npages = 1; mthca_buf_alloc()
220 npages *= 2; mthca_buf_alloc()
223 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); mthca_buf_alloc()
227 for (i = 0; i < npages; ++i) mthca_buf_alloc()
231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; mthca_buf_alloc()
234 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); mthca_buf_alloc()
238 buf->page_list = kmalloc(npages * sizeof *buf->page_list, mthca_buf_alloc()
243 for (i = 0; i < npages; ++i) mthca_buf_alloc()
246 for (i = 0; i < npages; ++i) { mthca_buf_alloc()
261 dma_list, shift, npages, mthca_buf_alloc()
H A Dmthca_memfree.c69 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, mthca_free_icm_pages()
72 for (i = 0; i < chunk->npages; ++i) mthca_free_icm_pages()
81 for (i = 0; i < chunk->npages; ++i) { mthca_free_icm_coherent()
137 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, mthca_alloc_icm() argument
157 while (npages > 0) { mthca_alloc_icm()
165 chunk->npages = 0; mthca_alloc_icm()
170 while (1 << cur_order > npages) mthca_alloc_icm()
175 &chunk->mem[chunk->npages], mthca_alloc_icm()
178 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], mthca_alloc_icm()
182 ++chunk->npages; mthca_alloc_icm()
186 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { mthca_alloc_icm()
188 chunk->npages, mthca_alloc_icm()
195 if (chunk->npages == MTHCA_ICM_CHUNK_LEN) mthca_alloc_icm()
198 npages -= 1 << cur_order; mthca_alloc_icm()
208 chunk->npages, mthca_alloc_icm()
298 for (i = 0; i < chunk->npages; ++i) { mthca_table_find()
526 int npages; mthca_init_user_db_tab() local
532 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; mthca_init_user_db_tab()
533 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); mthca_init_user_db_tab()
538 for (i = 0; i < npages; ++i) { mthca_init_user_db_tab()
590 start = dev->db_tab->npages - 1; mthca_alloc_db()
713 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; mthca_init_db_tab()
715 dev->db_tab->min_group2 = dev->db_tab->npages - 1; mthca_init_db_tab()
717 dev->db_tab->page = kmalloc(dev->db_tab->npages * mthca_init_db_tab()
725 for (i = 0; i < dev->db_tab->npages; ++i) mthca_init_db_tab()
744 for (i = 0; i < dev->db_tab->npages; ++i) { mthca_cleanup_db_tab()
H A Dmthca_memfree.h53 int npages; member in struct:mthca_icm_chunk
82 struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
145 int npages; member in struct:mthca_db_table
H A Dmthca_eq.c470 int npages; mthca_create_eq() local
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; mthca_create_eq()
482 eq->page_list = kmalloc(npages * sizeof *eq->page_list, mthca_create_eq()
487 for (i = 0; i < npages; ++i) mthca_create_eq()
490 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); mthca_create_eq()
499 for (i = 0; i < npages; ++i) { mthca_create_eq()
519 dma_list, PAGE_SHIFT, npages, mthca_create_eq()
520 0, npages * PAGE_SIZE, mthca_create_eq()
571 for (i = 0; i < npages; ++i) mthca_create_eq()
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / mthca_free_eq() local
619 for (i = 0; i < npages; ++i) mthca_free_eq()
H A Dmthca_provider.c904 int npages; mthca_reg_phys_mr() local
931 npages = 0; mthca_reg_phys_mr()
933 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift; mthca_reg_phys_mr()
935 if (!npages) mthca_reg_phys_mr()
938 page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); mthca_reg_phys_mr()
952 "in PD %x; shift %d, npages %d.\n", mthca_reg_phys_mr()
956 shift, npages); mthca_reg_phys_mr()
960 page_list, shift, npages, mthca_reg_phys_mr()
/linux-4.1.27/arch/powerpc/kernel/
H A Diommu.c178 unsigned long npages, iommu_range_alloc()
185 int largealloc = npages > 15; iommu_range_alloc()
198 if (unlikely(npages == 0)) { iommu_range_alloc()
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, iommu_range_alloc()
285 end = n + npages; iommu_range_alloc()
307 void *page, unsigned int npages, iommu_alloc()
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); iommu_alloc()
325 build_fail = ppc_md.tce_build(tbl, entry, npages, iommu_alloc()
335 __iommu_free(tbl, ret, npages); iommu_alloc()
350 unsigned int npages) iommu_free_check()
357 if (((free_entry + npages) > tbl->it_size) || iommu_free_check()
397 unsigned int npages) __iommu_free()
408 if (!iommu_free_check(tbl, dma_addr, npages)) __iommu_free()
411 ppc_md.tce_free(tbl, entry, npages); __iommu_free()
414 bitmap_clear(tbl->it_map, free_entry, npages); __iommu_free()
419 unsigned int npages) iommu_free()
421 __iommu_free(tbl, dma_addr, npages); iommu_free()
460 unsigned long vaddr, npages, entry, slen; for_each_sg() local
470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); for_each_sg()
475 entry = iommu_range_alloc(dev, tbl, npages, &handle, for_each_sg()
484 "vaddr %lx npages %lu\n", tbl, vaddr, for_each_sg()
485 npages); for_each_sg()
495 npages, entry, dma_addr); for_each_sg()
498 build_fail = ppc_md.tce_build(tbl, entry, npages, for_each_sg()
559 unsigned long vaddr, npages; for_each_sg() local
562 npages = iommu_num_pages(s->dma_address, s->dma_length, for_each_sg()
564 __iommu_free(tbl, vaddr, npages); for_each_sg()
588 unsigned int npages; ppc_iommu_unmap_sg() local
593 npages = iommu_num_pages(dma_handle, sg->dma_length, ppc_iommu_unmap_sg()
595 __iommu_free(tbl, dma_handle, npages); ppc_iommu_unmap_sg()
764 unsigned int npages, align; iommu_map_page() local
770 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); iommu_map_page()
778 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, iommu_map_page()
784 "vaddr %p npages %d\n", tbl, vaddr, iommu_map_page()
785 npages); iommu_map_page()
798 unsigned int npages; iommu_unmap_page() local
803 npages = iommu_num_pages(dma_handle, size, iommu_unmap_page()
805 iommu_free(tbl, dma_handle, npages); iommu_unmap_page()
932 unsigned long npages) iommu_tce_clear_param_check()
945 if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) iommu_tce_clear_param_check()
176 iommu_range_alloc(struct device *dev, struct iommu_table *tbl, unsigned long npages, unsigned long *handle, unsigned long mask, unsigned int align_order) iommu_range_alloc() argument
306 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, struct dma_attrs *attrs) iommu_alloc() argument
349 iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) iommu_free_check() argument
396 __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) __iommu_free() argument
418 iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) iommu_free() argument
930 iommu_tce_clear_param_check(struct iommu_table *tbl, unsigned long ioba, unsigned long tce_value, unsigned long npages) iommu_tce_clear_param_check() argument
/linux-4.1.27/arch/sparc/mm/
H A Diommu.c177 static u32 iommu_get_one(struct device *dev, struct page *page, int npages) iommu_get_one() argument
186 ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page)); iommu_get_one()
194 for (i = 0; i < npages; i++) { iommu_get_one()
202 iommu_flush_iotlb(iopte0, npages); iommu_get_one()
210 int npages; iommu_get_scsi_one() local
215 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_one()
217 busa = iommu_get_one(dev, page, npages); iommu_get_scsi_one()
283 static void iommu_release_one(struct device *dev, u32 busa, int npages) iommu_release_one() argument
291 for (i = 0; i < npages; i++) { iommu_release_one()
296 bit_map_clear(&iommu->usemap, ioptex, npages); iommu_release_one()
302 int npages; iommu_release_scsi_one() local
305 npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_scsi_one()
306 iommu_release_one(dev, vaddr & PAGE_MASK, npages); iommu_release_scsi_one()
H A Dio-unit.c96 int i, j, k, npages; iounit_get_area() local
100 npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_get_area()
103 switch (npages) { iounit_get_area()
109 IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); iounit_get_area()
116 if (scan + npages > limit) { iounit_get_area()
127 for (k = 1, scan++; k < npages; k++) iounit_get_area()
131 scan -= npages; iounit_get_area()
134 for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { iounit_get_area()
H A Dsrmmu.c951 unsigned long npages; srmmu_paging_init() local
957 npages = max_low_pfn - pfn_base; srmmu_paging_init()
959 zones_size[ZONE_DMA] = npages; srmmu_paging_init()
960 zholes_size[ZONE_DMA] = npages - pages_avail; srmmu_paging_init()
962 npages = highend_pfn - max_low_pfn; srmmu_paging_init()
963 zones_size[ZONE_HIGHMEM] = npages; srmmu_paging_init()
964 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); srmmu_paging_init()
/linux-4.1.27/arch/x86/kvm/
H A Diommu.c44 gfn_t base_gfn, unsigned long npages);
47 unsigned long npages) kvm_pin_pages()
53 end_gfn = gfn + npages; kvm_pin_pages()
65 static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) kvm_unpin_pages() argument
69 for (i = 0; i < npages; ++i) kvm_unpin_pages()
86 end_gfn = gfn + slot->npages; kvm_iommu_map_pages()
272 gfn_t base_gfn, unsigned long npages) kvm_iommu_put_pages()
280 end_gfn = base_gfn + npages; kvm_iommu_put_pages()
316 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); kvm_iommu_unmap_pages()
46 kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long npages) kvm_pin_pages() argument
271 kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) kvm_iommu_put_pages() argument
H A Dpaging_tmpl.h152 int npages; cmpxchg_gpte() local
157 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); cmpxchg_gpte()
159 if (unlikely(npages != 1)) cmpxchg_gpte()
H A Dmmu.c1428 (memslot->npages << PAGE_SHIFT)); kvm_for_each_memslot()
4385 last_gfn = memslot->base_gfn + memslot->npages - 1; kvm_mmu_slot_remove_write_access()
4476 last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1, kvm_mmu_zap_collapsible_sptes()
4506 last_gfn = memslot->base_gfn + memslot->npages - 1; kvm_mmu_slot_leaf_clear_dirty()
4544 last_gfn = memslot->base_gfn + memslot->npages - 1; kvm_mmu_slot_largepage_remove_write_access()
4582 last_gfn = memslot->base_gfn + memslot->npages - 1; kvm_mmu_slot_set_dirty()
4831 nr_pages += memslot->npages; kvm_mmu_calculate_mmu_pages()
H A Dx86.c7534 unsigned long npages) kvm_arch_create_memslot()
7543 lpages = gfn_to_index(slot->base_gfn + npages - 1, kvm_arch_create_memslot()
7560 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) kvm_arch_create_memslot()
7617 userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, kvm_arch_prepare_memory_region()
7692 old->npages * PAGE_SIZE); kvm_arch_commit_memory_region()
7533 kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvm_arch_create_memslot() argument
/linux-4.1.27/fs/sysv/
H A Ddir.c73 unsigned long npages = dir_pages(inode); sysv_readdir() local
84 for ( ; n < npages; n++, offset = 0) { sysv_readdir()
137 unsigned long npages = dir_pages(dir); sysv_find_entry() local
144 if (start >= npages) sysv_find_entry()
165 if (++n >= npages) sysv_find_entry()
184 unsigned long npages = dir_pages(dir); sysv_add_link() local
191 for (n = 0; n <= npages; n++) { sysv_add_link()
292 unsigned long i, npages = dir_pages(inode); sysv_empty_dir() local
294 for (i = 0; i < npages; i++) { sysv_empty_dir()
/linux-4.1.27/fs/nfs/
H A Dnfs3acl.c157 unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT); __nfs3_proc_setacls() local
161 args.pages[args.npages] = alloc_page(GFP_KERNEL); __nfs3_proc_setacls()
162 if (args.pages[args.npages] == NULL) __nfs3_proc_setacls()
164 args.npages++; __nfs3_proc_setacls()
165 } while (args.npages < npages); __nfs3_proc_setacls()
197 while (args.npages != 0) { __nfs3_proc_setacls()
198 args.npages--; __nfs3_proc_setacls()
199 __free_page(args.pages[args.npages]); __nfs3_proc_setacls()
H A Dfscache.c372 unsigned npages = *nr_pages; __nfs_readpages_from_fscache() local
376 nfs_i_fscache(inode), npages, inode); __nfs_readpages_from_fscache()
383 if (*nr_pages < npages) __nfs_readpages_from_fscache()
385 npages); __nfs_readpages_from_fscache()
H A Ddirect.c271 static void nfs_direct_release_pages(struct page **pages, unsigned int npages) nfs_direct_release_pages() argument
274 for (i = 0; i < npages; i++) nfs_direct_release_pages()
491 unsigned npages, i; nfs_direct_read_schedule_iovec() local
500 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; nfs_direct_read_schedule_iovec()
501 for (i = 0; i < npages; i++) { nfs_direct_read_schedule_iovec()
524 nfs_direct_release_pages(pagevec, npages); nfs_direct_read_schedule_iovec()
878 unsigned npages, i; nfs_direct_write_schedule_iovec() local
887 npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; nfs_direct_write_schedule_iovec()
888 for (i = 0; i < npages; i++) { nfs_direct_write_schedule_iovec()
915 nfs_direct_release_pages(pagevec, npages); nfs_direct_write_schedule_iovec()
H A Dread.c377 unsigned long npages; nfs_readpages() local
415 npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> nfs_readpages()
417 nfs_add_stats(inode, NFSIOS_READPAGES, npages); nfs_readpages()
H A Ddir.c586 void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages) nfs_readdir_free_pagearray() argument
589 for (i = 0; i < npages; i++) nfs_readdir_free_pagearray()
595 unsigned int npages) nfs_readdir_free_large_page()
597 nfs_readdir_free_pagearray(pages, npages); nfs_readdir_free_large_page()
605 int nfs_readdir_large_page(struct page **pages, unsigned int npages) nfs_readdir_large_page() argument
609 for (i = 0; i < npages; i++) { nfs_readdir_large_page()
594 nfs_readdir_free_large_page(void *ptr, struct page **pages, unsigned int npages) nfs_readdir_free_large_page() argument
H A Dpagelist.c34 p->npages = pagecount; nfs_pgarray_set()
40 p->npages = 0; nfs_pgarray_set()
H A Dnfs4proc.c4574 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); __nfs4_get_acl_uncached() local
4579 if (npages == 0) __nfs4_get_acl_uncached()
4580 npages = 1; __nfs4_get_acl_uncached()
4581 if (npages > ARRAY_SIZE(pages)) __nfs4_get_acl_uncached()
4584 for (i = 0; i < npages; i++) { __nfs4_get_acl_uncached()
4595 args.acl_len = npages * PAGE_SIZE; __nfs4_get_acl_uncached()
4598 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __nfs4_get_acl_uncached()
4599 __func__, buf, buflen, npages, args.acl_len); __nfs4_get_acl_uncached()
4624 for (i = 0; i < npages; i++) __nfs4_get_acl_uncached()
4681 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); __nfs4_proc_set_acl() local
4686 if (npages > ARRAY_SIZE(pages)) __nfs4_proc_set_acl()
H A Dnfs3xdr.c1342 if (args->npages != 0) nfs3_xdr_enc_setacl3args()
/linux-4.1.27/arch/arm64/kernel/
H A Defi.c171 u64 paddr, npages, size; reserve_regions() local
178 npages = md->num_pages; reserve_regions()
184 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, reserve_regions()
188 memrange_efi_to_native(&paddr, &npages); reserve_regions()
189 size = npages << PAGE_SHIFT; reserve_regions()
237 u64 paddr, npages, size; efi_virtmap_init() local
246 npages = md->num_pages; efi_virtmap_init()
247 memrange_efi_to_native(&paddr, &npages); efi_virtmap_init()
248 size = npages << PAGE_SHIFT; efi_virtmap_init()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dmr.c140 int npages = 1 << ent->order; add_keys() local
163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); add_keys()
675 int npages; get_octo_len() local
678 npages = ALIGN(len + offset, page_size) >> ilog2(page_size); get_octo_len()
679 return (npages + 1) / 2; get_octo_len()
710 umrwr->npages = n; prep_umr_reg_wqe()
752 u64 virt_addr, u64 len, int npages, reg_umr()
787 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); reg_umr()
797 memset(pas + npages, 0, size - npages * sizeof(u64)); reg_umr()
807 prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, reg_umr()
847 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, mlx5_ib_update_mtt() argument
872 npages += start_page_index & page_index_mask; mlx5_ib_update_mtt()
876 pages_to_map = ALIGN(npages, page_index_alignment); mlx5_ib_update_mtt()
908 npages = min_t(size_t, mlx5_ib_update_mtt()
914 start_page_index, npages, pas, mlx5_ib_update_mtt()
918 memset(pas + npages, 0, size - npages * sizeof(u64)); mlx5_ib_update_mtt()
927 sg.length = ALIGN(npages * sizeof(u64), mlx5_ib_update_mtt()
936 umrwr->npages = sg.length / sizeof(u64); mlx5_ib_update_mtt()
970 int npages, int page_shift, reg_create()
985 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2; reg_create()
1040 int npages; mlx5_ib_reg_user_mr() local
1054 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); mlx5_ib_reg_user_mr()
1055 if (!npages) { mlx5_ib_reg_user_mr()
1061 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", mlx5_ib_reg_user_mr()
1062 npages, ncont, order, page_shift); mlx5_ib_reg_user_mr()
1089 mr->npages = npages; mlx5_ib_reg_user_mr()
1090 atomic_add(npages, &dev->mdev->priv.reg_pages); mlx5_ib_reg_user_mr()
1191 int npages = mr->npages; mlx5_ib_dereg_mr() local
1209 atomic_sub(npages, &dev->mdev->priv.reg_pages); mlx5_ib_dereg_mr()
1220 atomic_sub(npages, &dev->mdev->priv.reg_pages); mlx5_ib_dereg_mr()
751 reg_umr(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) reg_umr() argument
968 reg_create(struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, int page_shift, int access_flags) reg_create() argument
H A Dsrq.c84 int npages; create_srq_user() local
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, create_srq_user()
160 int npages; create_srq_kernel() local
185 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); create_srq_kernel()
186 mlx5_ib_dbg(dev, "buf_size %d, page_shift %d, npages %d, calc npages %d\n", create_srq_kernel()
187 buf_size, page_shift, srq->buf.npages, npages); create_srq_kernel()
188 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * npages; create_srq_kernel()
H A Dodp.c194 int npages = 0, ret = 0; pagefault_single_data_segment() local
244 npages = ib_umem_odp_map_dma_pages(mr->umem, io_virt, bcnt, pagefault_single_data_segment()
246 if (npages < 0) { pagefault_single_data_segment()
247 ret = npages; pagefault_single_data_segment()
251 if (npages > 0) { pagefault_single_data_segment()
259 ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0); pagefault_single_data_segment()
271 u32 new_mappings = npages * PAGE_SIZE - pagefault_single_data_segment()
296 return ret ? ret : npages; pagefault_single_data_segment()
322 int ret = 0, npages = 0; pagefault_data_segments() local
380 npages += ret; pagefault_data_segments()
383 return ret < 0 ? ret : npages; pagefault_data_segments()
H A Dcq.c612 int npages; create_cq_user() local
646 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, create_cq_user()
648 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", create_cq_user()
649 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); create_cq_user()
712 *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages; create_cq_kernel()
952 int npages; resize_user() local
969 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, resize_user()
1102 npas = cq->resize_buf->buf.npages; mlx5_ib_resize_cq()
H A Dmlx5_ib.h256 unsigned int npages; member in struct:mlx5_umr_wr
324 int npages; member in struct:mlx5_ib_mr
572 int npages, int zap);
585 int npages, u64 iova);
H A Dqp.c610 int npages; create_user_qp() local
666 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, create_user_qp()
673 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", create_user_qp()
674 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); create_user_qp()
778 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; create_kernel_qp()
1865 static __be16 get_klm_octo(int npages) get_klm_octo() argument
1867 return cpu_to_be16(ALIGN(npages, 8) / 2); get_klm_octo()
1977 umr->klm_octowords = get_klm_octo(umrwr->npages); set_reg_umr_segment()
/linux-4.1.27/drivers/infiniband/core/
H A Dumem.c58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { __ib_umem_release()
92 unsigned long npages; ib_umem_get() local
164 npages = ib_umem_num_pages(umem); ib_umem_get()
168 locked = npages + current->mm->pinned_vm; ib_umem_get()
178 if (npages == 0) { ib_umem_get()
183 ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); ib_umem_get()
190 while (npages) { ib_umem_get()
192 min_t(unsigned long, npages, ib_umem_get()
199 umem->npages += ret; ib_umem_get()
201 npages -= ret; ib_umem_get()
216 umem->npages,
H A Dumem_odp.c528 int j, k, ret = 0, start_idx, npages = 0; ib_umem_odp_map_dma_pages() local
575 npages = get_user_pages(owning_process, owning_mm, user_virt, ib_umem_odp_map_dma_pages()
581 if (npages < 0) ib_umem_odp_map_dma_pages()
584 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); ib_umem_odp_map_dma_pages()
585 user_virt += npages << PAGE_SHIFT; ib_umem_odp_map_dma_pages()
587 for (j = 0; j < npages; ++j) { ib_umem_odp_map_dma_pages()
599 for (++j; j < npages; ++j) ib_umem_odp_map_dma_pages()
606 if (npages < 0 && k == start_idx) ib_umem_odp_map_dma_pages()
607 ret = npages; ib_umem_odp_map_dma_pages()
/linux-4.1.27/crypto/
H A Daf_alg.c395 int npages, i; af_alg_make_sg() local
401 npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; af_alg_make_sg()
402 if (WARN_ON(npages == 0)) af_alg_make_sg()
405 sg_init_table(sgl->sg, npages + 1); af_alg_make_sg()
407 for (i = 0, len = n; i < npages; i++) { af_alg_make_sg()
415 sg_mark_end(sgl->sg + npages - 1); af_alg_make_sg()
416 sgl->npages = npages; af_alg_make_sg()
424 sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); af_alg_link_sg()
425 sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); af_alg_link_sg()
433 for (i = 0; i < sgl->npages; i++) af_alg_free_sg()
/linux-4.1.27/arch/x86/kernel/
H A Dpci-calgary_64.c204 unsigned long start_addr, unsigned int npages) iommu_range_reserve()
216 end = index + npages; iommu_range_reserve()
222 bitmap_set(tbl->it_map, index, npages); iommu_range_reserve()
229 unsigned int npages) iommu_range_alloc()
238 BUG_ON(npages == 0); iommu_range_alloc()
243 npages, 0, boundary_size, 0); iommu_range_alloc()
248 npages, 0, boundary_size, 0); iommu_range_alloc()
259 tbl->it_hint = offset + npages; iommu_range_alloc()
268 void *vaddr, unsigned int npages, int direction) iommu_alloc()
273 entry = iommu_range_alloc(dev, tbl, npages); iommu_alloc()
277 npages, tbl); iommu_alloc()
285 tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, iommu_alloc()
291 unsigned int npages) iommu_free()
307 BUG_ON(entry + npages > tbl->it_size); iommu_free()
309 tce_free(tbl, entry, npages); iommu_free()
313 bitmap_clear(tbl->it_map, entry, npages); iommu_free()
353 unsigned int npages; for_each_sg() local
360 npages = iommu_num_pages(dma, dmalen, PAGE_SIZE); for_each_sg()
361 iommu_free(tbl, dma, npages); for_each_sg()
372 unsigned int npages; calgary_map_sg() local
380 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); for_each_sg()
382 entry = iommu_range_alloc(dev, tbl, npages); for_each_sg()
392 tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); for_each_sg()
414 unsigned int npages; calgary_map_page() local
418 npages = iommu_num_pages(uaddr, size, PAGE_SIZE); calgary_map_page()
420 return iommu_alloc(dev, tbl, vaddr, npages, dir); calgary_map_page()
428 unsigned int npages; calgary_unmap_page() local
430 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); calgary_unmap_page()
431 iommu_free(tbl, dma_addr, npages); calgary_unmap_page()
439 unsigned int npages, order; calgary_alloc_coherent() local
443 npages = size >> PAGE_SHIFT; calgary_alloc_coherent()
455 mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); calgary_alloc_coherent()
471 unsigned int npages; calgary_free_coherent() local
475 npages = size >> PAGE_SHIFT; calgary_free_coherent()
477 iommu_free(tbl, dma_handle, npages); calgary_free_coherent()
730 unsigned int npages; calgary_reserve_regions() local
741 npages = ((1024 - 640) * 1024) >> PAGE_SHIFT; calgary_reserve_regions()
744 npages = (1 * 1024 * 1024) >> PAGE_SHIFT; calgary_reserve_regions()
746 iommu_range_reserve(tbl, start, npages); calgary_reserve_regions()
1546 unsigned int npages; calgary_fixup_one_tce_space() local
1563 npages = resource_size(r) >> PAGE_SHIFT; calgary_fixup_one_tce_space()
1564 npages++; calgary_fixup_one_tce_space()
1566 iommu_range_reserve(tbl, r->start, npages); calgary_fixup_one_tce_space()
203 iommu_range_reserve(struct iommu_table *tbl, unsigned long start_addr, unsigned int npages) iommu_range_reserve() argument
227 iommu_range_alloc(struct device *dev, struct iommu_table *tbl, unsigned int npages) iommu_range_alloc() argument
267 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *vaddr, unsigned int npages, int direction) iommu_alloc() argument
290 iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) iommu_free() argument
H A Dtce_64.c50 unsigned int npages, unsigned long uaddr, int direction) tce_build()
62 while (npages--) { tce_build()
75 void tce_free(struct iommu_table *tbl, long index, unsigned int npages) tce_free() argument
81 while (npages--) { tce_free()
49 tce_build(struct iommu_table *tbl, unsigned long index, unsigned int npages, unsigned long uaddr, int direction) tce_build() argument
H A Damd_gart_64.c217 unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); dma_map_area() local
224 iommu_page = alloc_iommu(dev, npages, align_mask); dma_map_area()
234 for (i = 0; i < npages; i++) { dma_map_area()
270 int npages; gart_unmap_page() local
278 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); gart_unmap_page()
279 for (i = 0; i < npages; i++) { gart_unmap_page()
282 free_iommu(iommu_page, npages); gart_unmap_page()
/linux-4.1.27/arch/x86/include/asm/
H A Dtce.h42 unsigned int npages, unsigned long uaddr, int direction);
43 extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages);
/linux-4.1.27/lib/
H A Diommu-common.c103 unsigned long npages, iommu_tbl_range_alloc()
116 bool largealloc = (large_pool && npages > iommu_large_alloc); iommu_tbl_range_alloc()
124 if (unlikely(npages == 0)) { iommu_tbl_range_alloc()
183 * (index + npages) < num_tsb_entries iommu_tbl_range_alloc()
189 n = iommu_area_alloc(iommu->map, limit, start, npages, shift, iommu_tbl_range_alloc()
219 end = n + npages; iommu_tbl_range_alloc()
256 unsigned long npages, unsigned long entry) iommu_tbl_range_free()
267 bitmap_clear(iommu->map, entry, npages); iommu_tbl_range_free()
101 iommu_tbl_range_alloc(struct device *dev, struct iommu_map_table *iommu, unsigned long npages, unsigned long *handle, unsigned long mask, unsigned int align_order) iommu_tbl_range_alloc() argument
255 iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, unsigned long npages, unsigned long entry) iommu_tbl_range_free() argument
H A Diov_iter.c754 int npages = 0; iov_iter_npages() local
761 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) iov_iter_npages()
763 if (npages >= maxpages) iov_iter_npages()
766 npages++; iov_iter_npages()
767 if (npages >= maxpages) iov_iter_npages()
771 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) iov_iter_npages()
773 if (npages >= maxpages) iov_iter_npages()
777 return npages; iov_iter_npages()
/linux-4.1.27/arch/tile/kernel/
H A Dmodule.c43 int npages; module_alloc() local
45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; module_alloc()
46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); module_alloc()
49 for (; i < npages; ++i) { module_alloc()
58 area->nr_pages = npages; module_alloc()
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c109 unsigned long npages; usnic_uiom_get_pages() local
129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; usnic_uiom_get_pages()
133 locked = npages + current->mm->locked_vm; usnic_uiom_get_pages()
146 while (npages) { usnic_uiom_get_pages()
148 min_t(unsigned long, npages, usnic_uiom_get_pages()
155 npages -= ret; usnic_uiom_get_pages()
220 int npages; __usnic_uiom_reg_release() local
226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; __usnic_uiom_reg_release()
228 vpn_last = vpn_start + npages - 1; __usnic_uiom_reg_release()
342 unsigned long npages; usnic_uiom_reg_get() local
357 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; usnic_uiom_reg_get()
359 vpn_last = vpn_start + npages - 1; usnic_uiom_reg_get()
/linux-4.1.27/fs/minix/
H A Ddir.c91 unsigned long npages = dir_pages(inode); minix_readdir() local
103 for ( ; n < npages; n++, offset = 0) { minix_readdir()
163 unsigned long npages = dir_pages(dir); minix_find_entry() local
171 for (n = 0; n < npages; n++) { minix_find_entry()
212 unsigned long npages = dir_pages(dir); minix_add_link() local
227 for (n = 0; n <= npages; n++) { minix_add_link()
368 unsigned long i, npages = dir_pages(inode); minix_empty_dir() local
373 for (i = 0; i < npages; i++) { minix_empty_dir()
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_dmabuf.c221 int npages; udl_prime_create() local
223 npages = size / PAGE_SIZE; udl_prime_create()
226 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); udl_prime_create()
231 obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); udl_prime_create()
233 DRM_ERROR("obj pages is NULL %d\n", npages); udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); udl_prime_create()
/linux-4.1.27/fs/qnx6/
H A Ddir.c122 unsigned long npages = dir_pages(inode); qnx6_readdir() local
131 for ( ; !done && n < npages; n++, start = 0) { qnx6_readdir()
224 unsigned long npages = dir_pages(dir); qnx6_find_entry() local
231 if (npages == 0) qnx6_find_entry()
234 if (start >= npages) qnx6_find_entry()
266 if (++n >= npages) qnx6_find_entry()
/linux-4.1.27/fs/ramfs/
H A Dfile-nommu.c68 unsigned long npages, xpages, loop; ramfs_nommu_expand_for_mapping() local
93 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; ramfs_nommu_expand_for_mapping()
98 for (loop = npages; loop < xpages; loop++) ramfs_nommu_expand_for_mapping()
102 newsize = PAGE_SIZE * npages; ramfs_nommu_expand_for_mapping()
107 for (loop = 0; loop < npages; loop++) { ramfs_nommu_expand_for_mapping()
126 while (loop < npages) ramfs_nommu_expand_for_mapping()
/linux-4.1.27/arch/powerpc/platforms/pasemi/
H A Diommu.c89 long npages, unsigned long uaddr, iobmap_build()
97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); iobmap_build()
103 while (npages--) { iobmap_build()
118 long npages) iobmap_free()
123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages); iobmap_free()
129 while (npages--) { iobmap_free()
88 iobmap_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) iobmap_build() argument
117 iobmap_free(struct iommu_table *tbl, long index, long npages) iobmap_free() argument
/linux-4.1.27/fs/freevxfs/
H A Dvxfs_lookup.c116 u_long npages, page, nblocks, pblocks, block; vxfs_find_entry() local
121 npages = dir_pages(ip); vxfs_find_entry()
125 for (page = 0; page < npages; page++) { vxfs_find_entry()
243 u_long page, npages, block, pblocks, nblocks, offset; vxfs_readdir() local
261 npages = dir_pages(ip); vxfs_readdir()
269 for (; page < npages; page++, block = 0) { vxfs_readdir()
/linux-4.1.27/arch/powerpc/platforms/pseries/
H A Diommu.c84 long npages, unsigned long uaddr, tce_build_pSeries()
99 while (npages--) { tce_build_pSeries()
114 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) tce_free_pSeries() argument
120 while (npages--) tce_free_pSeries()
140 long npages, unsigned long uaddr, tce_build_pSeriesLP()
148 long tcenum_start = tcenum, npages_start = npages; tce_build_pSeriesLP()
155 while (npages--) { tce_build_pSeriesLP()
162 (npages_start - (npages + 1))); tce_build_pSeriesLP()
183 long npages, unsigned long uaddr, tce_buildmulti_pSeriesLP()
192 long tcenum_start = tcenum, npages_start = npages; tce_buildmulti_pSeriesLP()
196 if (npages == 1) { tce_buildmulti_pSeriesLP()
197 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, tce_buildmulti_pSeriesLP()
213 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, tce_buildmulti_pSeriesLP()
230 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); tce_buildmulti_pSeriesLP()
242 npages -= limit; tce_buildmulti_pSeriesLP()
244 } while (npages > 0 && !rc); tce_buildmulti_pSeriesLP()
251 (npages_start - (npages + limit))); tce_buildmulti_pSeriesLP()
258 printk("\tnpages = 0x%llx\n", (u64)npages); tce_buildmulti_pSeriesLP()
265 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) tce_free_pSeriesLP() argument
269 while (npages--) { tce_free_pSeriesLP()
284 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) tce_freemulti_pSeriesLP() argument
288 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); tce_freemulti_pSeriesLP()
294 printk("\tnpages = 0x%llx\n", (u64)npages); tce_freemulti_pSeriesLP()
83 tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) tce_build_pSeries() argument
139 tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) tce_build_pSeriesLP() argument
182 tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) tce_buildmulti_pSeriesLP() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
H A Dgk20a.c204 struct nvkm_oclass *oclass, u32 npages, u32 align, gk20a_instobj_ctor_dma()
218 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, gk20a_instobj_ctor_dma()
234 node->r.length = (npages << PAGE_SHIFT) >> 12; gk20a_instobj_ctor_dma()
246 struct nvkm_oclass *oclass, u32 npages, u32 align, gk20a_instobj_ctor_iommu()
256 sizeof(*node) + sizeof(node->pages[0]) * npages, gk20a_instobj_ctor_iommu()
263 for (i = 0; i < npages; i++) { gk20a_instobj_ctor_iommu()
275 ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages, gk20a_instobj_ctor_iommu()
284 for (i = 0; i < npages; i++) { gk20a_instobj_ctor_iommu()
317 for (i = 0; i < npages && node->pages[i] != NULL; i++) gk20a_instobj_ctor_iommu()
203 gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, u32 npages, u32 align, struct gk20a_instobj_priv **_node) gk20a_instobj_ctor_dma() argument
245 gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, u32 npages, u32 align, struct gk20a_instobj_priv **_node) gk20a_instobj_ctor_iommu() argument
/linux-4.1.27/fs/exofs/
H A Ddir.c248 unsigned long npages = dir_pages(inode); exofs_readdir() local
255 for ( ; n < npages; n++, offset = 0) { exofs_readdir()
316 unsigned long npages = dir_pages(dir); exofs_find_entry() local
321 if (npages == 0) exofs_find_entry()
327 if (start >= npages) exofs_find_entry()
351 if (++n >= npages) exofs_find_entry()
441 unsigned long npages = dir_pages(dir); exofs_add_link() local
447 for (n = 0; n <= npages; n++) { exofs_add_link()
618 unsigned long i, npages = dir_pages(inode); exofs_empty_dir() local
620 for (i = 0; i < npages; i++) { exofs_empty_dir()
/linux-4.1.27/arch/arm/kernel/
H A Dprocess.c340 unsigned int npages) sigpage_addr()
350 last = TASK_SIZE - (npages << PAGE_SHIFT); sigpage_addr()
381 unsigned long npages; arch_setup_additional_pages() local
391 npages = 1; /* for sigpage */ arch_setup_additional_pages()
392 npages += vdso_total_pages; arch_setup_additional_pages()
395 hint = sigpage_addr(mm, npages); arch_setup_additional_pages()
396 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); arch_setup_additional_pages()
339 sigpage_addr(const struct mm_struct *mm, unsigned int npages) sigpage_addr() argument
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dmem.c396 struct c4iw_mr *mhp, int shift, int npages) reregister_mem()
401 if (npages > mhp->attr.pbl_size) reregister_mem()
421 static int alloc_pbl(struct c4iw_mr *mhp, int npages) alloc_pbl() argument
424 npages << 3); alloc_pbl()
429 mhp->attr.pbl_size = npages; alloc_pbl()
436 u64 *total_size, int *npages, build_phys_page_list()
473 *npages = 0; build_phys_page_list()
475 *npages += (buffer_list[i].size + build_phys_page_list()
478 if (!*npages) build_phys_page_list()
481 *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); build_phys_page_list()
496 *npages); build_phys_page_list()
513 int npages; c4iw_reregister_phys_mem() local
542 &total_size, &npages, c4iw_reregister_phys_mem()
553 ret = reregister_mem(rhp, php, &mh, shift, npages); c4iw_reregister_phys_mem()
566 mhp->attr.pbl_size = npages; c4iw_reregister_phys_mem()
579 int npages; c4iw_register_phys_mem() local
608 &total_size, &npages, &shift, c4iw_register_phys_mem()
619 ret = alloc_pbl(mhp, npages); c4iw_register_phys_mem()
626 npages); c4iw_register_phys_mem()
639 mhp->attr.pbl_size = npages; c4iw_register_phys_mem()
395 reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) reregister_mem() argument
434 build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) build_phys_page_list() argument
/linux-4.1.27/arch/alpha/kernel/
H A Dpci_iommu.c258 long npages, dma_ofs, i; pci_map_single_1()
301 npages = iommu_num_pages(paddr, size, PAGE_SIZE); pci_map_single_1()
306 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); pci_map_single_1()
314 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) pci_map_single_1()
321 cpu_addr, size, npages, ret, __builtin_return_address(0)); pci_map_single_1()
379 long dma_ofs, npages; alpha_pci_unmap_page()
412 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); alpha_pci_unmap_page()
416 iommu_arena_free(arena, dma_ofs, npages); alpha_pci_unmap_page()
427 dma_addr, size, npages, __builtin_return_address(0));
563 long npages, dma_ofs, i; sg_fill()
596 npages = iommu_num_pages(paddr, size, PAGE_SIZE); sg_fill()
597 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); sg_fill()
613 __va(paddr), size, out->dma_address, npages); sg_fill()
632 npages = iommu_num_pages(paddr, size, PAGE_SIZE); sg_fill()
635 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) sg_fill()
641 last_sg->length, npages); sg_fill()
761 long npages, ofs; alpha_pci_unmap_sg()
787 npages = iommu_num_pages(addr, size, PAGE_SIZE); alpha_pci_unmap_sg()
789 iommu_arena_free(arena, ofs, npages); alpha_pci_unmap_sg()
257 long npages, dma_ofs, i; pci_map_single_1() local
378 long dma_ofs, npages; alpha_pci_unmap_page() local
562 long npages, dma_ofs, i; sg_fill() local
760 long npages, ofs; alpha_pci_unmap_sg() local
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_user_sdma.c277 unsigned long addr, int tlen, int npages) ipath_user_sdma_pin_pages()
283 ret = get_user_pages_fast(addr, npages, 0, pages); ipath_user_sdma_pin_pages()
284 if (ret != npages) { ipath_user_sdma_pin_pages()
294 for (j = 0; j < npages; j++) { ipath_user_sdma_pin_pages()
331 const int npages = ipath_user_sdma_num_pages(iov + idx); ipath_user_sdma_pin_pkt() local
336 npages); ipath_user_sdma_pin_pkt()
355 unsigned long niov, int npages) ipath_user_sdma_init_payload()
359 if (npages >= ARRAY_SIZE(pkt->addr)) ipath_user_sdma_init_payload()
416 int npages = 0; ipath_user_sdma_queue_pkts() local
492 npages++; ipath_user_sdma_queue_pkts()
495 npages++; ipath_user_sdma_queue_pkts()
524 nfrags, npages); ipath_user_sdma_queue_pkts()
275 ipath_user_sdma_pin_pages(const struct ipath_devdata *dd, struct ipath_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) ipath_user_sdma_pin_pages() argument
351 ipath_user_sdma_init_payload(const struct ipath_devdata *dd, struct ipath_user_sdma_queue *pq, struct ipath_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) ipath_user_sdma_init_payload() argument
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dpci.c576 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages, pnv_tce_build() argument
592 while (npages--) pnv_tce_build()
606 static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages, pnv_tce_build_vm() argument
611 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, pnv_tce_build_vm()
615 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages, pnv_tce_free() argument
622 while (npages--) pnv_tce_free()
629 static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages) pnv_tce_free_vm() argument
631 pnv_tce_free(tbl, index, npages, false); pnv_tce_free_vm()
639 static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages, pnv_tce_build_rm() argument
644 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true); pnv_tce_build_rm()
647 static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages) pnv_tce_free_rm() argument
649 pnv_tce_free(tbl, index, npages, true); pnv_tce_free_rm()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_prime.c34 int npages = nvbo->bo.num_pages; nouveau_gem_prime_get_sg_table() local
36 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); nouveau_gem_prime_get_sg_table()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_prime.c35 int npages = bo->tbo.num_pages; radeon_gem_prime_get_sg_table() local
37 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); radeon_gem_prime_get_sg_table()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_gem.c43 int npages) get_pages_vram()
51 p = drm_malloc_ab(npages, sizeof(struct page *)); get_pages_vram()
56 npages, 0, DRM_MM_SEARCH_DEFAULT); get_pages_vram()
63 for (i = 0; i < npages; i++) { get_pages_vram()
79 int npages = obj->size >> PAGE_SHIFT; get_pages() local
84 p = get_pages_vram(obj, npages); get_pages()
92 msm_obj->sgt = drm_prime_pages_to_sg(p, npages); get_pages()
670 int ret, npages; msm_gem_import() local
686 npages = size / PAGE_SIZE; msm_gem_import()
690 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *)); msm_gem_import()
696 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); msm_gem_import()
42 get_pages_vram(struct drm_gem_object *obj, int npages) get_pages_vram() argument
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_heap.c36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; ion_heap_map_kernel() local
37 struct page **pages = vmalloc(sizeof(struct page *) * npages); ion_heap_map_kernel()
52 BUG_ON(i >= npages); ion_heap_map_kernel()
56 vaddr = vmap(pages, npages, VM_MAP, pgprot); ion_heap_map_kernel()
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/
H A Decho_client.c170 struct page **pages, int npages, int async);
1172 struct page **pages, int npages, int async) cl_echo_object_brw()
1207 offset + npages * PAGE_CACHE_SIZE - 1, cl_echo_object_brw()
1213 for (i = 0; i < npages; i++) { cl_echo_object_brw()
1551 u32 npages; echo_client_kbrw() local
1577 npages = count >> PAGE_CACHE_SHIFT; echo_client_kbrw()
1582 OBD_ALLOC(pga, npages * sizeof(*pga)); echo_client_kbrw()
1586 OBD_ALLOC(pages, npages * sizeof(*pages)); echo_client_kbrw()
1588 OBD_FREE(pga, npages * sizeof(*pga)); echo_client_kbrw()
1593 i < npages; echo_client_kbrw()
1616 rc = cl_echo_object_brw(eco, rw, offset, pages, npages, async); echo_client_kbrw()
1622 for (i = 0, pgp = pga; i < npages; i++, pgp++) { echo_client_kbrw()
1637 OBD_FREE(pga, npages * sizeof(*pga)); echo_client_kbrw()
1638 OBD_FREE(pages, npages * sizeof(*pages)); echo_client_kbrw()
1654 u64 npages, tot_pages; echo_client_prep_commit() local
1661 npages = batch >> PAGE_CACHE_SHIFT; echo_client_prep_commit()
1664 OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local)); echo_client_prep_commit()
1665 OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote)); echo_client_prep_commit()
1679 for (; tot_pages; tot_pages -= npages) { echo_client_prep_commit()
1682 if (tot_pages < npages) echo_client_prep_commit()
1683 npages = tot_pages; echo_client_prep_commit()
1685 for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { echo_client_prep_commit()
1691 ioo.ioo_bufcnt = npages; echo_client_prep_commit()
1694 lpages = npages; echo_client_prep_commit()
1699 LASSERT(lpages == npages); echo_client_prep_commit()
1729 rnb, npages, lnb, oti, ret); echo_client_prep_commit()
1743 OBD_FREE(lnb, npages * sizeof(struct niobuf_local)); echo_client_prep_commit()
1745 OBD_FREE(rnb, npages * sizeof(struct niobuf_remote)); echo_client_prep_commit()
1171 cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, struct page **pages, int npages, int async) cl_echo_object_brw() argument
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_gem.c419 unsigned int npages, exynos_gem_get_pages_from_userptr()
429 for (i = 0; i < npages; ++i, start += PAGE_SIZE) { exynos_gem_get_pages_from_userptr()
438 if (i != npages) { exynos_gem_get_pages_from_userptr()
447 npages, 1, 1, pages, NULL); exynos_gem_get_pages_from_userptr()
449 if (get_npages != npages) { exynos_gem_get_pages_from_userptr()
460 unsigned int npages, exynos_gem_put_pages_to_userptr()
466 for (i = 0; i < npages; i++) { exynos_gem_put_pages_to_userptr()
418 exynos_gem_get_pages_from_userptr(unsigned long start, unsigned int npages, struct page **pages, struct vm_area_struct *vma) exynos_gem_get_pages_from_userptr() argument
459 exynos_gem_put_pages_to_userptr(struct page **pages, unsigned int npages, struct vm_area_struct *vma) exynos_gem_put_pages_to_userptr() argument
H A Dexynos_drm_g2d.c194 unsigned int npages; member in struct:g2d_cmdlist_userptr
386 g2d_userptr->npages, g2d_userptr_put_dma_addr()
415 unsigned int npages, offset; g2d_userptr_get_dma_addr() local
463 npages = (end - start) >> PAGE_SHIFT; g2d_userptr_get_dma_addr()
464 g2d_userptr->npages = npages; g2d_userptr_get_dma_addr()
466 pages = drm_calloc_large(npages, sizeof(struct page *)); g2d_userptr_get_dma_addr()
500 npages, pages, vma); g2d_userptr_get_dma_addr()
516 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, g2d_userptr_get_dma_addr()
537 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { g2d_userptr_get_dma_addr()
538 g2d->current_pool += npages << PAGE_SHIFT; g2d_userptr_get_dma_addr()
554 g2d_userptr->npages, g2d_userptr_get_dma_addr()
H A Dexynos_drm_gem.h163 unsigned int npages,
169 unsigned int npages,
/linux-4.1.27/arch/powerpc/sysdev/
H A Ddart_iommu.c164 long npages, unsigned long uaddr, dart_build()
172 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); dart_build()
179 l = npages; dart_build()
195 while (npages--) dart_build()
204 static void dart_free(struct iommu_table *tbl, long index, long npages) dart_free() argument
213 DBG("dart: free at: %lx, %lx\n", index, npages); dart_free()
217 while (npages--) dart_free()
163 dart_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, struct dma_attrs *attrs) dart_build() argument
/linux-4.1.27/fs/ufs/
H A Ddir.c259 unsigned long npages = ufs_dir_pages(dir); ufs_find_entry() local
266 if (npages == 0 || namelen > UFS_MAXNAMLEN) ufs_find_entry()
274 if (start >= npages) ufs_find_entry()
297 if (++n >= npages) ufs_find_entry()
323 unsigned long npages = ufs_dir_pages(dir); ufs_add_link() local
336 for (n = 0; n <= npages; n++) { ufs_add_link()
440 unsigned long npages = ufs_dir_pages(inode); ufs_readdir() local
450 for ( ; n < npages; n++, offset = 0) { ufs_readdir()
611 unsigned long i, npages = ufs_dir_pages(inode); ufs_empty_dir() local
613 for (i = 0; i < npages; i++) { ufs_empty_dir()
/linux-4.1.27/fs/nilfs2/
H A Ddir.c266 unsigned long npages = dir_pages(inode); nilfs_readdir() local
272 for ( ; n < npages; n++, offset = 0) { nilfs_readdir()
331 unsigned long npages = dir_pages(dir); nilfs_find_entry() local
336 if (npages == 0) nilfs_find_entry()
343 if (start >= npages) nilfs_find_entry()
366 if (++n >= npages) nilfs_find_entry()
446 unsigned long npages = dir_pages(dir); nilfs_add_link() local
457 for (n = 0; n <= npages; n++) { nilfs_add_link()
621 unsigned long i, npages = dir_pages(inode); nilfs_empty_dir() local
623 for (i = 0; i < npages; i++) { nilfs_empty_dir()
/linux-4.1.27/fs/ext2/
H A Ddir.c297 unsigned long npages = dir_pages(inode); ext2_readdir() local
308 for ( ; n < npages; n++, offset = 0) { ext2_readdir()
373 unsigned long npages = dir_pages(dir); ext2_find_entry() local
379 if (npages == 0) ext2_find_entry()
386 if (start >= npages) ext2_find_entry()
411 if (++n >= npages) ext2_find_entry()
497 unsigned long npages = dir_pages(dir); ext2_add_link() local
508 for (n = 0; n <= npages; n++) { ext2_add_link()
673 unsigned long i, npages = dir_pages(inode); ext2_empty_dir() local
676 for (i = 0; i < npages; i++) { ext2_empty_dir()
/linux-4.1.27/virt/kvm/
H A Dkvm_main.c564 free->npages = 0; kvm_free_physmem_slot()
672 if (!new->npages) { update_memslots()
673 WARN_ON(!mslots[i].npages); update_memslots()
676 if (mslots[i].npages) update_memslots()
679 if (!mslots[i].npages) update_memslots()
685 if (!mslots[i + 1].npages) update_memslots()
694 * so that it moves before all those with base_gfn == npages == 0. update_memslots()
696 * On the other hand, if new->npages is zero, the above loop has update_memslots()
701 if (new->npages) { update_memslots()
769 unsigned long npages; __kvm_set_memory_region() local
799 npages = mem->memory_size >> PAGE_SHIFT; __kvm_set_memory_region()
801 if (npages > KVM_MEM_MAX_NR_PAGES) __kvm_set_memory_region()
804 if (!npages) __kvm_set_memory_region()
811 new.npages = npages; __kvm_set_memory_region()
814 if (npages) { __kvm_set_memory_region()
815 if (!old.npages) __kvm_set_memory_region()
819 (npages != old.npages) || __kvm_set_memory_region()
832 } else if (old.npages) { __kvm_set_memory_region()
844 if (!((base_gfn + npages <= slot->base_gfn) || __kvm_set_memory_region()
845 (base_gfn >= slot->base_gfn + slot->npages))) __kvm_set_memory_region()
858 if (kvm_arch_create_memslot(kvm, &new, npages)) __kvm_set_memory_region()
1146 *nr_pages = slot->npages - (gfn - slot->base_gfn); __gfn_to_hva_many()
1220 int npages; hva_to_pfn_fast() local
1233 npages = __get_user_pages_fast(addr, 1, 1, page); hva_to_pfn_fast()
1234 if (npages == 1) { hva_to_pfn_fast()
1253 int npages = 0; hva_to_pfn_slow() local
1262 npages = get_user_page_nowait(current, current->mm, hva_to_pfn_slow()
1266 npages = __get_user_pages_unlocked(current, current->mm, addr, 1, hva_to_pfn_slow()
1269 if (npages != 1) hva_to_pfn_slow()
1270 return npages; hva_to_pfn_slow()
1276 npages = __get_user_pages_fast(addr, 1, 1, wpage); hva_to_pfn_slow()
1277 if (npages == 1) { hva_to_pfn_slow()
1283 npages = 1; hva_to_pfn_slow()
1286 return npages; hva_to_pfn_slow()
1319 int npages; hva_to_pfn() local
1330 npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); hva_to_pfn()
1331 if (npages == 1) hva_to_pfn()
1335 if (npages == -EHWPOISON || hva_to_pfn()
/linux-4.1.27/arch/s390/pci/
H A Dpci_dma.c321 int npages; s390_dma_unmap_pages() local
323 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); s390_dma_unmap_pages()
325 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, s390_dma_unmap_pages()
331 atomic64_add(npages, &zdev->unmapped_pages); s390_dma_unmap_pages()
333 dma_free_iommu(zdev, iommu_page_index, npages); s390_dma_unmap_pages()
/linux-4.1.27/fs/f2fs/
H A Ddebug.c129 unsigned npages; update_mem_info() local
195 npages = NODE_MAPPING(sbi)->nrpages; update_mem_info()
196 si->page_mem += npages << PAGE_CACHE_SHIFT; update_mem_info()
197 npages = META_MAPPING(sbi)->nrpages; update_mem_info()
198 si->page_mem += npages << PAGE_CACHE_SHIFT; update_mem_info()
H A Ddir.c212 unsigned long npages = dir_blocks(dir); f2fs_find_entry() local
223 if (npages == 0) f2fs_find_entry()
764 unsigned long npages = dir_blocks(inode); f2fs_readdir() local
775 if (npages - n > 1 && !ra_has_index(ra, n)) f2fs_readdir()
777 min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); f2fs_readdir()
779 for (; n < npages; n++) { f2fs_readdir()
H A Dsegment.c1473 int npages = npages_for_summary_flush(sbi, true); restore_curseg_summaries() local
1475 if (npages >= 2) restore_curseg_summaries()
1476 ra_meta_pages(sbi, start_sum_block(sbi), npages, restore_curseg_summaries()
H A Ddata.c79 int npages, bool is_read) __bio_alloc()
84 bio = bio_alloc(GFP_NOIO, npages); __bio_alloc()
78 __bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, int npages, bool is_read) __bio_alloc() argument
/linux-4.1.27/drivers/staging/lustre/lnet/lnet/
H A Drouter.c1235 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) lnet_destroy_rtrbuf() argument
1237 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); lnet_destroy_rtrbuf()
1239 while (--npages >= 0) lnet_destroy_rtrbuf()
1240 __free_page(rb->rb_kiov[npages].kiov_page); lnet_destroy_rtrbuf()
1248 int npages = rbp->rbp_npages; lnet_new_rtrbuf() local
1249 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); lnet_new_rtrbuf()
1260 for (i = 0; i < npages; i++) { lnet_new_rtrbuf()
1283 int npages = rbp->rbp_npages; lnet_rtrpool_free_bufs() local
1299 lnet_destroy_rtrbuf(rb, npages); lnet_rtrpool_free_bufs()
1344 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages) lnet_rtrpool_init() argument
1349 rbp->rbp_npages = npages; lnet_rtrpool_init()
1374 lnet_nrb_tiny_calculate(int npages) lnet_nrb_tiny_calculate() argument
1393 lnet_nrb_small_calculate(int npages) lnet_nrb_small_calculate() argument
1412 lnet_nrb_large_calculate(int npages) lnet_nrb_large_calculate() argument
/linux-4.1.27/drivers/vfio/
H A Dvfio_iommu_spapr_tce.c53 unsigned long locked, lock_limit, npages; tce_iommu_enable() local
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT; tce_iommu_enable()
85 locked = current->mm->locked_vm + npages; tce_iommu_enable()
93 current->mm->locked_vm += npages; tce_iommu_enable()
/linux-4.1.27/arch/powerpc/include/asm/
H A Dmachdep.h70 long npages,
76 long npages);
84 long npages,
90 long npages);
H A Diommu.h197 unsigned long npages);
H A Dkvm_ppc.h182 unsigned long npages);
260 unsigned long npages);
H A Dkvm_book3s_64.h388 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); slot_is_aligned()
/linux-4.1.27/include/crypto/
H A Dif_alg.h68 unsigned int npages; member in struct:af_alg_sgl
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_fbdev.c51 int npages; pan_worker() local
54 npages = fbi->fix.line_length >> PAGE_SHIFT; pan_worker()
55 omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); pan_worker()
H A Domap_gem.c230 int npages = obj->size >> PAGE_SHIFT; omap_gem_attach_pages() local
246 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); omap_gem_attach_pages()
252 for (i = 0; i < npages; i++) { omap_gem_attach_pages()
257 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); omap_gem_attach_pages()
284 int i, npages = obj->size >> PAGE_SHIFT; omap_gem_detach_pages() local
285 for (i = 0; i < npages; i++) { omap_gem_detach_pages()
664 uint32_t npages = obj->size >> PAGE_SHIFT; omap_gem_roll() local
667 if (roll > npages) { omap_gem_roll()
682 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); omap_gem_roll()
716 int i, npages = obj->size >> PAGE_SHIFT; omap_gem_dma_sync() local
720 for (i = 0; i < npages; i++) { omap_gem_dma_sync()
751 uint32_t npages = obj->size >> PAGE_SHIFT; omap_gem_get_paddr() local
777 ret = tiler_pin(block, pages, npages, omap_gem_get_paddr()
960 uint32_t npages = obj->size >> PAGE_SHIFT; omap_gem_resume() local
963 omap_obj->pages, npages, omap_gem_resume()
H A Domap_dmm_tiler.h92 uint32_t npages, uint32_t roll, bool wait);
H A Domap_dmm_tiler.c205 struct page **pages, uint32_t npages, uint32_t roll) dmm_txn_append()
237 if (n >= npages) dmm_txn_append()
238 n -= npages; dmm_txn_append()
305 uint32_t npages, uint32_t roll, bool wait) fill()
321 dmm_txn_append(txn, &p_area, pages, npages, roll); fill()
338 uint32_t npages, uint32_t roll, bool wait) tiler_pin()
342 ret = fill(&block->area, pages, npages, roll, wait); tiler_pin()
204 dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, struct page **pages, uint32_t npages, uint32_t roll) dmm_txn_append() argument
304 fill(struct tcm_area *area, struct page **pages, uint32_t npages, uint32_t roll, bool wait) fill() argument
337 tiler_pin(struct tiler_block *block, struct page **pages, uint32_t npages, uint32_t roll, bool wait) tiler_pin() argument
/linux-4.1.27/arch/powerpc/mm/
H A Dsubpage-prot.c61 int npages) hpte_flush_range()
80 for (; npages > 0; --npages) { hpte_flush_range()
60 hpte_flush_range(struct mm_struct *mm, unsigned long addr, int npages) hpte_flush_range() argument
H A Dhugetlbpage.c342 unsigned long npages; do_gpage_early_setup() local
356 if (sscanf(val, "%lu", &npages) <= 0) do_gpage_early_setup()
357 npages = 0; do_gpage_early_setup()
358 if (npages > MAX_NUMBER_GPAGES) { do_gpage_early_setup()
362 npages, size / 1024); do_gpage_early_setup()
363 npages = MAX_NUMBER_GPAGES; do_gpage_early_setup()
365 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; do_gpage_early_setup()
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dgss_rpc_upcall.c218 for (i = 0; i < arg->npages && arg->pages[i]; i++) gssp_free_receive_pages()
226 arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); gssp_alloc_receive_pages()
227 arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); gssp_alloc_receive_pages()
H A Dgss_rpc_xdr.h151 unsigned int npages; member in struct:gssx_arg_accept_sec_context
H A Dgss_rpc_xdr.c784 arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); gssx_enc_accept_sec_context()
/linux-4.1.27/drivers/xen/
H A Dprivcmd.c210 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || mmap_mfn_range()
211 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) mmap_mfn_range()
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) mmap_mfn_range()
221 msg->mfn, msg->npages, mmap_mfn_range()
227 st->va += msg->npages << PAGE_SHIFT; mmap_mfn_range()
/linux-4.1.27/fs/fuse/
H A Ddev.c39 unsigned npages) fuse_request_init()
42 memset(pages, 0, sizeof(*pages) * npages); fuse_request_init()
43 memset(page_descs, 0, sizeof(*page_descs) * npages); fuse_request_init()
50 req->max_pages = npages; fuse_request_init()
53 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) __fuse_request_alloc() argument
60 if (npages <= FUSE_REQ_INLINE_PAGES) { __fuse_request_alloc()
64 pages = kmalloc(sizeof(struct page *) * npages, flags); __fuse_request_alloc()
66 npages, flags); __fuse_request_alloc()
76 fuse_request_init(req, pages, page_descs, npages); __fuse_request_alloc()
81 struct fuse_req *fuse_request_alloc(unsigned npages) fuse_request_alloc() argument
83 return __fuse_request_alloc(npages, GFP_KERNEL); fuse_request_alloc()
87 struct fuse_req *fuse_request_alloc_nofs(unsigned npages) fuse_request_alloc_nofs() argument
89 return __fuse_request_alloc(npages, GFP_NOFS); fuse_request_alloc_nofs()
145 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, __fuse_get_req() argument
171 req = fuse_request_alloc(npages); __fuse_get_req()
189 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) fuse_get_req() argument
191 return __fuse_get_req(fc, npages, false); fuse_get_req()
196 unsigned npages) fuse_get_req_for_background()
198 return __fuse_get_req(fc, npages, true); fuse_get_req_for_background()
37 fuse_request_init(struct fuse_req *req, struct page **pages, struct fuse_page_desc *page_descs, unsigned npages) fuse_request_init() argument
195 fuse_get_req_for_background(struct fuse_conn *fc, unsigned npages) fuse_get_req_for_background() argument
H A Dfuse_i.h759 struct fuse_req *fuse_request_alloc(unsigned npages);
761 struct fuse_req *fuse_request_alloc_nofs(unsigned npages);
772 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages);
774 unsigned npages);
H A Dfile.c1267 unsigned npages; fuse_get_user_pages() local
1281 npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; fuse_get_user_pages()
1284 fuse_page_descs_length_init(req, req->num_pages, npages); fuse_get_user_pages()
1286 req->num_pages += npages; fuse_get_user_pages()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_user_sdma.c665 unsigned long addr, int tlen, int npages) qib_user_sdma_pin_pages()
671 while (npages) { qib_user_sdma_pin_pages()
672 if (npages > 8) qib_user_sdma_pin_pages()
675 j = npages; qib_user_sdma_pin_pages()
705 npages -= j; qib_user_sdma_pin_pages()
729 const int npages = qib_user_sdma_num_pages(iov + idx); qib_user_sdma_pin_pkt() local
733 iov[idx].iov_len, npages); qib_user_sdma_pin_pkt()
765 unsigned long niov, int npages) qib_user_sdma_init_payload()
770 npages >= ARRAY_SIZE(pkt->addr)) qib_user_sdma_init_payload()
831 int npages = 0; qib_user_sdma_queue_pkts() local
890 npages += qib_user_sdma_num_pages(&iov[idx]); qib_user_sdma_queue_pkts()
913 n = npages*((2*PAGE_SIZE/frag_size)+1); qib_user_sdma_queue_pkts()
989 nfrags, npages); qib_user_sdma_queue_pkts()
662 qib_user_sdma_pin_pages(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, unsigned long addr, int tlen, int npages) qib_user_sdma_pin_pages() argument
761 qib_user_sdma_init_payload(const struct qib_devdata *dd, struct qib_user_sdma_queue *pq, struct qib_user_sdma_pkt *pkt, const struct iovec *iov, unsigned long niov, int npages) qib_user_sdma_init_payload() argument
/linux-4.1.27/arch/parisc/mm/
H A Dinit.c294 unsigned long npages; setup_bootmem() local
297 npages = pmem_ranges[i].pages; setup_bootmem()
302 (start_pfn + npages) ); setup_bootmem()
305 (npages << PAGE_SHIFT) ); setup_bootmem()
307 if ((start_pfn + npages) > max_pfn) setup_bootmem()
308 max_pfn = start_pfn + npages; setup_bootmem()
/linux-4.1.27/arch/x86/platform/efi/
H A Defi.c513 u64 addr, npages; efi_set_executable() local
516 npages = md->num_pages; efi_set_executable()
518 memrange_efi_to_native(&addr, &npages); efi_set_executable()
521 set_memory_x(addr, npages); efi_set_executable()
523 set_memory_nx(addr, npages); efi_set_executable()
545 u64 npages; efi_memory_uc() local
547 npages = round_up(size, page_shift) / page_shift; efi_memory_uc()
548 memrange_efi_to_native(&addr, &npages); efi_memory_uc()
549 set_memory_uc(addr, npages); efi_memory_uc()
H A Defi_64.c148 unsigned npages; efi_setup_page_tables() local
186 npages = (_end - _text) >> PAGE_SHIFT; efi_setup_page_tables()
189 if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) { efi_setup_page_tables()
/linux-4.1.27/include/rdma/
H A Dib_umem.h57 int npages; member in struct:ib_umem
/linux-4.1.27/include/uapi/xen/
H A Dprivcmd.h48 __u64 npages; member in struct:privcmd_mmap_entry
/linux-4.1.27/arch/mips/kvm/
H A Dmips.c194 unsigned long npages) kvm_arch_create_memslot()
212 unsigned long npages = 0; kvm_arch_commit_memory_region() local
222 npages = mem->memory_size >> PAGE_SHIFT; kvm_arch_commit_memory_region()
224 if (npages) { kvm_arch_commit_memory_region()
225 kvm->arch.guest_pmap_npages = npages; kvm_arch_commit_memory_region()
227 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); kvm_arch_commit_memory_region()
235 npages, kvm->arch.guest_pmap); kvm_arch_commit_memory_region()
238 for (i = 0; i < npages; i++) kvm_arch_commit_memory_region()
991 ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_vm_ioctl_get_dirty_log()
193 kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvm_arch_create_memslot() argument
/linux-4.1.27/drivers/infiniband/ulp/srp/
H A Dib_srp.h277 * @npages: Number of page addresses in the pages[] array.
294 unsigned int npages; member in struct:srp_map_state
H A Dib_srp.c1280 state->npages, io_addr); srp_map_finish_fmr()
1310 sizeof(state->pages[0]) * state->npages); srp_map_finish_fr()
1317 wr.wr.fast_reg.page_list_len = state->npages; srp_map_finish_fr()
1340 if (state->npages == 0) srp_finish_mapping()
1343 if (state->npages == 1 && !register_always) srp_finish_mapping()
1352 state->npages = 0; srp_finish_mapping()
1422 if (state->npages == dev->max_pages_per_mr || offset != 0) { srp_map_sg_entry()
1432 if (!state->npages) srp_map_sg_entry()
1434 state->pages[state->npages++] = dma_addr & dev->mr_page_mask; srp_map_sg_entry()
/linux-4.1.27/arch/x86/vdso/
H A Dvma.c30 int npages = (image->size) / PAGE_SIZE; init_vdso_image() local
33 for (i = 0; i < npages; i++) init_vdso_image()
/linux-4.1.27/arch/powerpc/platforms/cell/
H A Diommu.c167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, tce_build_cell() argument
201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) tce_build_cell()
206 invalidate_tce_cache(window->iommu, io_pte, npages); tce_build_cell()
209 index, npages, direction, base_pte); tce_build_cell()
213 static void tce_free_cell(struct iommu_table *tbl, long index, long npages) tce_free_cell() argument
221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); tce_free_cell()
235 for (i = 0; i < npages; i++) tce_free_cell()
240 invalidate_tce_cache(window->iommu, io_pte, npages); tce_free_cell()
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/
H A Dmodule.c155 static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages, kportal_memhog_alloc() argument
167 if (npages < 0) kportal_memhog_alloc()
170 if (npages == 0) kportal_memhog_alloc()
183 while (ldu->ldu_memhog_pages < npages && kportal_memhog_alloc()
198 while (ldu->ldu_memhog_pages < npages && kportal_memhog_alloc()
/linux-4.1.27/fs/nfs/blocklayout/
H A Dblocklayout.c241 header->page_array.npages, f_offset, bl_read_pagelist()
253 for (i = pg_index; i < header->page_array.npages; i++) { bl_read_pagelist()
290 header->page_array.npages - i, bl_read_pagelist()
403 for (i = pg_index; i < header->page_array.npages; i++) { bl_write_pagelist()
417 bio = do_add_page_to_bio(bio, header->page_array.npages - i, bl_write_pagelist()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_gem.c466 int i, npages; drm_gem_get_pages() local
477 npages = obj->size >> PAGE_SHIFT; drm_gem_get_pages()
479 pages = drm_malloc_ab(npages, sizeof(struct page *)); drm_gem_get_pages()
483 for (i = 0; i < npages; i++) { drm_gem_get_pages()
519 int i, npages; drm_gem_put_pages() local
527 npages = obj->size >> PAGE_SHIFT; drm_gem_put_pages()
529 for (i = 0; i < npages; i++) { drm_gem_put_pages()
/linux-4.1.27/net/sunrpc/
H A Dsvcsock.c914 unsigned int i, len, npages; svc_tcp_restore_pages() local
919 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; svc_tcp_restore_pages()
920 for (i = 0; i < npages; i++) { svc_tcp_restore_pages()
933 unsigned int i, len, npages; svc_tcp_save_pages() local
938 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; svc_tcp_save_pages()
939 for (i = 0; i < npages; i++) { svc_tcp_save_pages()
947 unsigned int i, len, npages; svc_tcp_clear_pages() local
952 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; svc_tcp_clear_pages()
953 for (i = 0; i < npages; i++) { svc_tcp_clear_pages()
/linux-4.1.27/arch/parisc/kernel/
H A Dinventory.c135 unsigned long npages; pagezero_memconfig() local
150 npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); pagezero_memconfig()
151 set_pmem_entry(pmem_ranges,0UL,npages); pagezero_memconfig()
/linux-4.1.27/sound/pci/emu10k1/
H A Dmemory.c103 static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) search_empty_map_area() argument
106 int max_size = npages; search_empty_map_area()
116 if (size == npages) { search_empty_map_area()
/linux-4.1.27/drivers/usb/mon/
H A Dmon_bin.c220 static int mon_alloc_buff(struct mon_pgmap *map, int npages);
221 static void mon_free_buff(struct mon_pgmap *map, int npages);
1304 static int mon_alloc_buff(struct mon_pgmap *map, int npages) mon_alloc_buff() argument
1309 for (n = 0; n < npages; n++) { mon_alloc_buff()
1322 static void mon_free_buff(struct mon_pgmap *map, int npages) mon_free_buff() argument
1326 for (n = 0; n < npages; n++) mon_free_buff()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Drpc_rdma.c306 int i, npages, curlen; rpcrdma_inline_pullup() local
345 npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT; rpcrdma_inline_pullup()
346 for (i = 0; copy_len && i < npages; i++) { rpcrdma_inline_pullup()
613 int i, npages, curlen, olen; rpcrdma_inline_fixup() local
640 npages = PAGE_ALIGN(page_base + rpcrdma_inline_fixup()
642 for (; i < npages; i++) { rpcrdma_inline_fixup()
/linux-4.1.27/include/linux/mlx4/
H A Ddevice.h600 int npages; member in struct:mlx4_buf
1034 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
1040 int npages, int page_shift, struct mlx4_mr *mr);
1048 int start_index, int npages, u64 *page_list);
1326 int npages, u64 iova, u32 *lkey, u32 *rkey);
1439 u64 iova, u64 size, int npages,
/linux-4.1.27/arch/x86/kernel/cpu/
H A Dperf_event_intel_pt.c619 unsigned long idx, npages, wakeup; pt_buffer_reset_markers() local
640 npages = handle->size >> PAGE_SHIFT; pt_buffer_reset_markers()
644 npages++; pt_buffer_reset_markers()
646 idx = (head >> PAGE_SHIFT) + npages; pt_buffer_reset_markers()
653 idx = (head >> PAGE_SHIFT) + npages - 1; pt_buffer_reset_markers()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd.c1089 int npages = p->ibp_npages; kiblnd_free_pages() local
1092 for (i = 0; i < npages; i++) { kiblnd_free_pages()
1097 LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); kiblnd_free_pages()
1100 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) kiblnd_alloc_pages() argument
1106 offsetof(kib_pages_t, ibp_pages[npages])); kiblnd_alloc_pages()
1108 CERROR("Can't allocate descriptor for %d pages\n", npages); kiblnd_alloc_pages()
1112 memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); kiblnd_alloc_pages()
1113 p->ibp_npages = npages; kiblnd_alloc_pages()
1115 for (i = 0; i < npages; i++) { kiblnd_alloc_pages()
1120 CERROR("Can't allocate page %d of %d\n", i, npages); kiblnd_alloc_pages()
1524 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, kiblnd_fmr_pool_map() argument
1541 pages, npages, iov); kiblnd_fmr_pool_map()
H A Do2iblnd.h961 int npages, __u64 iov, kib_fmr_t *fmr);
981 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
H A Do2iblnd_cb.c556 int npages; kiblnd_fmr_map_tx() local
567 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { kiblnd_fmr_map_tx()
570 pages[npages++] = (rd->rd_frags[i].rf_addr & kiblnd_fmr_map_tx()
578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); kiblnd_fmr_map_tx()
580 CERROR("Can't map %d pages: %d\n", npages, rc); kiblnd_fmr_map_tx()
/linux-4.1.27/include/linux/mlx5/
H A Ddriver.h339 int npages; member in struct:mlx5_buf
676 gfp_t flags, int npages);
706 s32 npages);
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Ddir.c157 int npages; ll_dir_filler() local
173 for (npages = 1; npages < max_pages; npages++) { ll_dir_filler()
177 page_pool[npages] = page; ll_dir_filler()
182 op_data->op_npages = npages; ll_dir_filler()
204 CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages); ll_dir_filler()
207 for (i = 1; i < npages; i++) { ll_dir_filler()
H A Drw26.c216 static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) ll_free_user_pages() argument
220 for (i = 0; i < npages; i++) { ll_free_user_pages()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dmr.c469 int npages, u64 iova) mlx4_ib_map_phys_fmr()
474 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, mlx4_ib_map_phys_fmr()
468 mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, u64 iova) mlx4_ib_map_phys_fmr() argument
H A Dsrq.c164 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, mlx4_ib_create_srq()
H A Dmlx4_ib.h719 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
H A Dcq.c111 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, mlx4_ib_alloc_cq_buf()
H A Dqp.c780 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, create_qp_common()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_bitmap.c640 struct page **npages, **opages = NULL; drbd_bm_resize() local
697 npages = b->bm_pages; drbd_bm_resize()
700 npages = NULL; drbd_bm_resize()
702 npages = bm_realloc_pages(b, want); drbd_bm_resize()
705 if (!npages) { drbd_bm_resize()
719 b->bm_pages = npages; drbd_bm_resize()
735 /* implicit: (opages != NULL) && (opages != npages) */ drbd_bm_resize()
742 if (opages != npages) drbd_bm_resize()
/linux-4.1.27/drivers/edac/
H A Di5100_edac.c856 const unsigned long npages = i5100_npages(mci, i); i5100_init_csrows() local
860 if (!npages) i5100_init_csrows()
866 dimm->nr_pages = npages; i5100_init_csrows()
876 chan, rank, (long)PAGES_TO_MiB(npages)); i5100_init_csrows()
H A Dsb_edac.c853 unsigned i, j, banks, ranks, rows, cols, npages; get_dimm_config() local
929 npages = MiB_TO_PAGES(size); get_dimm_config()
933 size, npages, get_dimm_config()
936 dimm->nr_pages = npages; get_dimm_config()
H A Di7core_edac.c595 u32 size, npages; get_dimm_config() local
615 npages = MiB_TO_PAGES(size); get_dimm_config()
617 dimm->nr_pages = npages; get_dimm_config()
/linux-4.1.27/fs/jfs/
H A Djfs_logmgr.c2392 int npages = 0; lmLogFormat() local
2403 npages = logSize >> sbi->l2nbperpage; lmLogFormat()
2423 logsuper->size = cpu_to_le32(npages); lmLogFormat()
2435 * init pages 2 to npages-1 as log data pages: lmLogFormat()
2444 * the N (= npages-2) data pages of the log is maintained as lmLogFormat()
2461 lp->h.page = lp->t.page = cpu_to_le32(npages - 3); lmLogFormat()
2480 for (lspn = 0; lspn < npages - 3; lspn++) { lmLogFormat()
H A Djfs_dmap.c4048 #define BMAPPGTOLEV(npages) \
4049 (((npages) <= 3 + MAXL0PAGES) ? 0 : \
4050 ((npages) <= 2 + MAXL1PAGES) ? 1 : 2)
4056 s64 npages, ndmaps; dbMapFileSizeToMapSize() local
4061 npages = nblocks >> JFS_SBI(sb)->l2nbperpage; dbMapFileSizeToMapSize()
4062 level = BMAPPGTOLEV(npages); dbMapFileSizeToMapSize()
4069 npages--; /* skip the first global control page */ dbMapFileSizeToMapSize()
4071 npages -= (2 - level); dbMapFileSizeToMapSize()
4072 npages--; /* skip top level's control page */ dbMapFileSizeToMapSize()
4076 complete = (u32) npages / factor; dbMapFileSizeToMapSize()
4081 npages = (u32) npages % factor; dbMapFileSizeToMapSize()
4083 npages--; dbMapFileSizeToMapSize()
H A Djfs_xtree.c2576 int nb, npages, nblks; xtRelocate() local
2676 npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE; xtRelocate()
2678 npages = ((offset + nbytes - 1) >> CM_L2BSIZE) - xtRelocate()
2686 offset += nb, pno++, npages--) { xtRelocate()
2691 if (rc = cmRead(ip, offset, npages, &cp)) xtRelocate()
/linux-4.1.27/arch/arm/kvm/
H A Dmmu.c358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; stage2_flush_memslot()
749 phys_addr_t size = PAGE_SIZE * memslot->npages; stage2_unmap_memslot()
1164 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; kvm_mmu_wp_memory_region()
1485 (memslot->npages << PAGE_SHIFT)); kvm_for_each_memslot()
1756 if (memslot->base_gfn + memslot->npages >= kvm_arch_prepare_memory_region()
1833 unsigned long npages) kvm_arch_create_memslot()
1859 phys_addr_t size = slot->npages << PAGE_SHIFT; kvm_arch_flush_shadow_memslot()
1832 kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvm_arch_create_memslot() argument
/linux-4.1.27/fs/
H A Dsplice.c1445 unsigned long off, npages; get_iovec_page_array() local
1482 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; get_iovec_page_array()
1483 if (npages > pipe_buffers - buffers) get_iovec_page_array()
1484 npages = pipe_buffers - buffers; get_iovec_page_array()
1486 error = get_user_pages_fast((unsigned long)base, npages, get_iovec_page_array()
1519 if (error < npages || buffers == pipe_buffers) get_iovec_page_array()
/linux-4.1.27/drivers/block/
H A Dnvme-core.c408 iod->npages = -1; iod_init()
455 if (iod->npages == 0) nvme_free_iod()
457 for (i = 0; i < iod->npages; i++) { nvme_free_iod()
672 iod->npages = 0; nvme_setup_prps()
675 iod->npages = 1; nvme_setup_prps()
681 iod->npages = -1; nvme_setup_prps()
693 list[iod->npages++] = prp_list; nvme_setup_prps()
853 iod->npages = 0; nvme_queue_rq()
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
H A Dframework.c1105 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, sfw_alloc_pages() argument
1109 LASSERT(npages > 0 && npages <= LNET_MAX_IOV); sfw_alloc_pages()
1111 rpc->srpc_bulk = srpc_alloc_bulk(cpt, npages, len, sink); sfw_alloc_pages()
H A Dselftest.h424 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len,
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_cmd_parser.c829 int npages = last_page - first_page; vmap_batch() local
832 pages = drm_malloc_ab(npages, sizeof(*pages)); vmap_batch()
841 if (i == npages) vmap_batch()
/linux-4.1.27/fs/afs/
H A Ddir.c93 __be16 npages; member in struct:afs_dir_pagehdr
143 if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) { afs_dir_check_page()
146 ntohs(dbuf->blocks[0].pagehdr.npages)); afs_dir_check_page()
/linux-4.1.27/drivers/scsi/cxgbi/
H A Dlibcxgbi.c1385 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >> ddp_make_gl() local
1397 npages * (sizeof(dma_addr_t) + ddp_make_gl()
1401 "xfer %u, %u pages, OOM.\n", xferlen, npages); ddp_make_gl()
1406 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages); ddp_make_gl()
1408 gl->pages = (struct page **)&gl->phys_addr[npages]; ddp_make_gl()
1409 gl->nelem = npages; ddp_make_gl()
/linux-4.1.27/mm/
H A Dnommu.c1564 unsigned long npages; split_vma() local
1591 npages = (addr - vma->vm_start) >> PAGE_SHIFT; split_vma()
1597 region->vm_pgoff = new->vm_pgoff += npages; split_vma()
1608 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; split_vma()
H A Dmmap.c2991 int may_expand_vm(struct mm_struct *mm, unsigned long npages) may_expand_vm() argument
2998 if (cur + npages > lim) may_expand_vm()
/linux-4.1.27/drivers/vhost/
H A Dscsi.c800 unsigned int npages = 0, offset, nbytes; vhost_scsi_map_to_sgl() local
828 sg_set_page(sg, pages[npages], nbytes, offset); vhost_scsi_map_to_sgl()
832 npages++; vhost_scsi_map_to_sgl()
/linux-4.1.27/arch/s390/kvm/
H A Dkvm-s390.c220 last_gfn = memslot->base_gfn + memslot->npages; kvm_s390_sync_dirty_log()
2564 unsigned long npages) kvm_arch_create_memslot()
2604 old->npages * PAGE_SIZE == mem->memory_size) kvm_arch_commit_memory_region()
2563 kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) kvm_arch_create_memslot() argument
/linux-4.1.27/net/core/
H A Dskbuff.c4376 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; alloc_skb_with_frags() local
4387 if (npages > MAX_SKB_FRAGS) alloc_skb_with_frags()
4399 skb->truesize += npages << PAGE_SHIFT; alloc_skb_with_frags()
4401 for (i = 0; npages > 0; i++) { alloc_skb_with_frags()
4405 if (npages >= 1 << order) { alloc_skb_with_frags()
4427 npages -= 1 << order; alloc_skb_with_frags()
/linux-4.1.27/fs/cifs/
H A Dfile.c2906 unsigned int npages, rsize, credits; cifs_send_async_read() local
2926 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); cifs_send_async_read()
2929 rdata = cifs_readdata_alloc(npages, cifs_send_async_read()
2937 rc = cifs_read_allocate_pages(rdata, npages); cifs_send_async_read()
2942 rdata->nr_pages = npages; cifs_send_async_read()
/linux-4.1.27/drivers/iommu/
H A Dintel-iommu.c4518 unsigned int npages; intel_iommu_unmap() local
4534 npages = last_pfn - start_pfn + 1; intel_iommu_unmap()
4546 npages, !freelist, 0); intel_iommu_unmap()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_net.h2412 unsigned npages, unsigned max_brw,
/linux-4.1.27/net/ipv4/
H A Dtcp_input.c4449 int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); tcp_send_rcvq() local
4451 data_len = npages << PAGE_SHIFT; tcp_send_rcvq()

Completed in 7105 milliseconds