num_pages          30 arch/arm/kernel/efi.c 	size = md->num_pages << EFI_PAGE_SHIFT;
num_pages          50 arch/arm/kernel/efi.c 		.length		= md->num_pages * EFI_PAGE_SIZE,
num_pages          66 arch/arm64/kernel/efi.c 	    !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
num_pages          80 arch/arm64/kernel/efi.c 			   md->num_pages << EFI_PAGE_SHIFT,
num_pages         112 arch/arm64/kernel/efi.c 				   md->num_pages << EFI_PAGE_SHIFT,
num_pages         277 arch/ia64/kernel/efi.c 	u64 num_pages;
num_pages         282 arch/ia64/kernel/efi.c #define efi_md_size(md)	(md->num_pages << EFI_PAGE_SHIFT)
num_pages         287 arch/ia64/kernel/efi.c 	return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
num_pages         319 arch/ia64/kernel/efi.c 		end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
num_pages         575 arch/ia64/kernel/efi.c 			size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         716 arch/ia64/kernel/efi.c 		if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
num_pages        1073 arch/ia64/kernel/efi.c 				k->num_pages = md->num_pages;
num_pages        1106 arch/ia64/kernel/efi.c 					(k-1)->num_pages +=
num_pages        1112 arch/ia64/kernel/efi.c 					k->num_pages = (lim - md->phys_addr)
num_pages        1127 arch/ia64/kernel/efi.c 					(k-1)->num_pages += md->num_pages;
num_pages        1131 arch/ia64/kernel/efi.c 					k->num_pages = (efi_md_end(md) - lim)
num_pages        1153 arch/ia64/kernel/efi.c 			prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
num_pages        1159 arch/ia64/kernel/efi.c 		k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
num_pages        1193 arch/ia64/kernel/efi.c 		if (md->num_pages == 0) /* should not happen */
num_pages          31 arch/m68k/mm/mcfmmu.c unsigned long num_pages;
num_pages          54 arch/m68k/mm/mcfmmu.c 	size = num_pages * sizeof(pte_t);
num_pages          86 arch/m68k/mm/mcfmmu.c 	zones_size[ZONE_DMA] = num_pages;
num_pages         170 arch/m68k/mm/mcfmmu.c 	num_pages = PFN_DOWN(_ramend - _rambase);
num_pages          32 arch/m68k/mm/sun3mmu.c extern unsigned long num_pages;
num_pages          58 arch/m68k/mm/sun3mmu.c 	size = num_pages * sizeof(pte_t);
num_pages          46 arch/m68k/sun3/config.c unsigned long num_pages;
num_pages         119 arch/m68k/sun3/config.c 	max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT;
num_pages          43 arch/mips/kernel/vdso.c 	unsigned long num_pages, i;
num_pages          49 arch/mips/kernel/vdso.c 	num_pages = image->size / PAGE_SIZE;
num_pages          52 arch/mips/kernel/vdso.c 	for (i = 0; i < num_pages; i++)
num_pages         746 arch/powerpc/kvm/e500_mmu.c 	int num_pages, ret, i;
num_pages         777 arch/powerpc/kvm/e500_mmu.c 	num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
num_pages         779 arch/powerpc/kvm/e500_mmu.c 	pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
num_pages         783 arch/powerpc/kvm/e500_mmu.c 	ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
num_pages         787 arch/powerpc/kvm/e500_mmu.c 	if (ret != num_pages) {
num_pages         788 arch/powerpc/kvm/e500_mmu.c 		num_pages = ret;
num_pages         793 arch/powerpc/kvm/e500_mmu.c 	virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
num_pages         838 arch/powerpc/kvm/e500_mmu.c 	vcpu_e500->num_shared_tlb_pages = num_pages;
num_pages         853 arch/powerpc/kvm/e500_mmu.c 	for (i = 0; i < num_pages; i++)
num_pages         166 arch/s390/include/asm/pgalloc.h unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
num_pages         579 arch/s390/mm/pgalloc.c unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
num_pages         586 arch/s390/mm/pgalloc.c 	end = addr + num_pages * PAGE_SIZE;
num_pages         575 arch/x86/boot/compressed/eboot.c 			prev->size += d->num_pages << 12;
num_pages         591 arch/x86/boot/compressed/eboot.c 		entry->size = d->num_pages << PAGE_SHIFT;
num_pages         768 arch/x86/boot/compressed/kaslr.c 		region.size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         132 arch/x86/include/asm/efi.h extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
num_pages        1223 arch/x86/mm/pageattr.c 			 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
num_pages        1231 arch/x86/mm/pageattr.c 	while (num_pages-- && start < end) {
num_pages        1242 arch/x86/mm/pageattr.c 			 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
num_pages        1252 arch/x86/mm/pageattr.c 		unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
num_pages        1257 arch/x86/mm/pageattr.c 		cur_pages = min_t(unsigned int, num_pages, cur_pages);
num_pages        1275 arch/x86/mm/pageattr.c 	if (num_pages == cur_pages)
num_pages        1308 arch/x86/mm/pageattr.c 		populate_pte(cpa, start, end, num_pages - cur_pages,
num_pages        1311 arch/x86/mm/pageattr.c 	return num_pages;
num_pages         135 arch/x86/platform/efi/efi.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         160 arch/x86/platform/efi/efi.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         247 arch/x86/platform/efi/efi.c 	u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
num_pages         251 arch/x86/platform/efi/efi.c 	if (md->num_pages == 0) {
num_pages         253 arch/x86/platform/efi/efi.c 	} else if (md->num_pages > EFI_PAGES_MAX ||
num_pages         254 arch/x86/platform/efi/efi.c 		   EFI_PAGES_MAX - md->num_pages <
num_pages         256 arch/x86/platform/efi/efi.c 		end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
num_pages         316 arch/x86/platform/efi/efi.c 			md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
num_pages         317 arch/x86/platform/efi/efi.c 			(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
num_pages         579 arch/x86/platform/efi/efi.c 	npages = md->num_pages;
num_pages         619 arch/x86/platform/efi/efi.c 	size	  = md->num_pages << PAGE_SHIFT;
num_pages         657 arch/x86/platform/efi/efi.c 		prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
num_pages         660 arch/x86/platform/efi/efi.c 			prev_md->num_pages += md->num_pages;
num_pages         674 arch/x86/platform/efi/efi.c 	size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         853 arch/x86/platform/efi/efi.c 	unsigned int num_pages;
num_pages         899 arch/x86/platform/efi/efi.c 	num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
num_pages         900 arch/x86/platform/efi/efi.c 	num_pages >>= PAGE_SHIFT;
num_pages         902 arch/x86/platform/efi/efi.c 	if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
num_pages          56 arch/x86/platform/efi/efi_32.c int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
num_pages         339 arch/x86/platform/efi/efi_64.c int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
num_pages         357 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
num_pages         425 arch/x86/platform/efi/efi_64.c 	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
num_pages         432 arch/x86/platform/efi/efi_64.c 	unsigned long size = md->num_pages << PAGE_SHIFT;
num_pages         525 arch/x86/platform/efi/efi_64.c 	err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
num_pages         531 arch/x86/platform/efi/efi_64.c 	err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
num_pages         258 arch/x86/platform/efi/quirks.c 	if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
num_pages         323 arch/x86/platform/efi/quirks.c 		u64 size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         398 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
num_pages         401 arch/x86/platform/efi/quirks.c 	if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
num_pages         414 arch/x86/platform/efi/quirks.c 		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
num_pages        2232 drivers/ata/libata-scsi.c 	int num_pages;
num_pages        2244 drivers/ata/libata-scsi.c 	num_pages = sizeof(pages);
num_pages        2246 drivers/ata/libata-scsi.c 		num_pages--;
num_pages        2247 drivers/ata/libata-scsi.c 	rbuf[3] = num_pages;	/* number of supported VPD pages */
num_pages        2248 drivers/ata/libata-scsi.c 	memcpy(rbuf + 4, pages, num_pages);
num_pages        1037 drivers/block/drbd/drbd_bitmap.c 	unsigned int num_pages, i, count = 0;
num_pages        1080 drivers/block/drbd/drbd_bitmap.c 	num_pages = b->bm_number_of_pages;
num_pages        1087 drivers/block/drbd/drbd_bitmap.c 		for (i = 0; i < num_pages; i++) {
num_pages        1098 drivers/block/drbd/drbd_bitmap.c 			if (i >= num_pages) /* == -1U: no hint here. */
num_pages        1112 drivers/block/drbd/drbd_bitmap.c 		for (i = 0; i < num_pages; i++) {
num_pages        1969 drivers/block/rbd.c 	int num_pages;
num_pages        1978 drivers/block/rbd.c 	num_pages = calc_pages_for(0, object_map_bytes) + 1;
num_pages        1979 drivers/block/rbd.c 	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
num_pages        1983 drivers/block/rbd.c 	reply_len = num_pages * PAGE_SIZE;
num_pages        2020 drivers/block/rbd.c 	ceph_release_page_vector(pages, num_pages);
num_pages        4937 drivers/block/rbd.c 	int num_pages = calc_pages_for(0, buf_len);
num_pages        4948 drivers/block/rbd.c 	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
num_pages         181 drivers/block/xen-blkback/blkback.c 	unsigned int num_pages = 0;
num_pages         187 drivers/block/xen-blkback/blkback.c 		page[num_pages] = list_first_entry(&ring->free_pages,
num_pages         189 drivers/block/xen-blkback/blkback.c 		list_del(&page[num_pages]->lru);
num_pages         191 drivers/block/xen-blkback/blkback.c 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
num_pages         193 drivers/block/xen-blkback/blkback.c 			gnttab_free_pages(num_pages, page);
num_pages         195 drivers/block/xen-blkback/blkback.c 			num_pages = 0;
num_pages         199 drivers/block/xen-blkback/blkback.c 	if (num_pages != 0)
num_pages         200 drivers/block/xen-blkback/blkback.c 		gnttab_free_pages(num_pages, page);
num_pages        1129 drivers/block/zram/zram_drv.c 	size_t num_pages = disksize >> PAGE_SHIFT;
num_pages        1133 drivers/block/zram/zram_drv.c 	for (index = 0; index < num_pages; index++)
num_pages        1142 drivers/block/zram/zram_drv.c 	size_t num_pages;
num_pages        1144 drivers/block/zram/zram_drv.c 	num_pages = disksize >> PAGE_SHIFT;
num_pages        1145 drivers/block/zram/zram_drv.c 	zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
num_pages        1191 drivers/char/agp/generic.c int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages)
num_pages        1196 drivers/char/agp/generic.c 	for (i = 0; i < num_pages; i++) {
num_pages        1213 drivers/char/agp/generic.c 	set_pages_array_uc(mem->pages, num_pages);
num_pages          48 drivers/firmware/efi/arm-init.c 		    (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
num_pages         196 drivers/firmware/efi/arm-init.c 		npages = md->num_pages;
num_pages          84 drivers/firmware/efi/arm-runtime.c 		    efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
num_pages         413 drivers/firmware/efi/efi.c 		size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         428 drivers/firmware/efi/efi.c 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         890 drivers/firmware/efi/efi.c 		    (md->num_pages << EFI_PAGE_SHIFT))))
num_pages         914 drivers/firmware/efi/efi.c 				  (md->num_pages << EFI_PAGE_SHIFT))))
num_pages         310 drivers/firmware/efi/libstub/arm-stub.c 	left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
num_pages         365 drivers/firmware/efi/libstub/arm-stub.c 		size = in->num_pages * EFI_PAGE_SIZE;
num_pages         135 drivers/firmware/efi/libstub/arm32-stub.c 		end = start + desc->num_pages * EFI_PAGE_SIZE;
num_pages         214 drivers/firmware/efi/libstub/efi-stub-helper.c 		if (desc->num_pages < nr_pages)
num_pages         218 drivers/firmware/efi/libstub/efi-stub-helper.c 		end = start + desc->num_pages * EFI_PAGE_SIZE;
num_pages         308 drivers/firmware/efi/libstub/efi-stub-helper.c 		if (desc->num_pages < nr_pages)
num_pages         312 drivers/firmware/efi/libstub/efi-stub-helper.c 		end = start + desc->num_pages * EFI_PAGE_SIZE;
num_pages          49 drivers/firmware/efi/libstub/random.c 	region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
num_pages          58 drivers/firmware/efi/memattr.c 	u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
num_pages          76 drivers/firmware/efi/memattr.c 	     !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
num_pages          90 drivers/firmware/efi/memattr.c 		u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
num_pages         168 drivers/firmware/efi/memattr.c 		size = md.num_pages << EFI_PAGE_SHIFT;
num_pages         224 drivers/firmware/efi/memmap.c 	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
num_pages         289 drivers/firmware/efi/memmap.c 		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
num_pages         298 drivers/firmware/efi/memmap.c 			md->num_pages = (m_end - md->phys_addr + 1) >>
num_pages         305 drivers/firmware/efi/memmap.c 			md->num_pages = (end - md->phys_addr + 1) >>
num_pages         311 drivers/firmware/efi/memmap.c 			md->num_pages = (m_start - md->phys_addr) >>
num_pages         319 drivers/firmware/efi/memmap.c 			md->num_pages = (m_end - m_start + 1) >>
num_pages         326 drivers/firmware/efi/memmap.c 			md->num_pages = (end - m_end) >>
num_pages         333 drivers/firmware/efi/memmap.c 			md->num_pages = (m_start - md->phys_addr) >>
num_pages         340 drivers/firmware/efi/memmap.c 			md->num_pages = (end - md->phys_addr + 1) >>
num_pages          48 drivers/firmware/efi/runtime-map.c EFI_RUNTIME_U64_ATTR_SHOW(num_pages);
num_pages          68 drivers/firmware/efi/runtime-map.c static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
num_pages         624 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
num_pages         639 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 		for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
num_pages          52 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	int npages = bo->tbo.num_pages;
num_pages          71 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c 	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
num_pages         124 drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c 	if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
num_pages         198 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
num_pages         230 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	    atomic64_read(&mgr->available) < mem->num_pages) {
num_pages         234 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	atomic64_sub(mem->num_pages, &mgr->available);
num_pages         244 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	node->node.size = mem->num_pages;
num_pages         262 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	atomic64_add(mem->num_pages, &mgr->available);
num_pages         290 drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c 	atomic64_add(mem->num_pages, &mgr->available);
num_pages         786 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
num_pages        1349 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 	size = bo->mem.num_pages << PAGE_SHIFT;
num_pages         173 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	return bo->tbo.num_pages << PAGE_SHIFT;
num_pages         178 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
num_pages         210 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 	for (pages_left = bo->tbo.mem.num_pages; pages_left;
num_pages         130 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h 			   __entry->pages = bo->tbo.num_pages;
num_pages          61 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			     struct ttm_mem_reg *mem, unsigned num_pages,
num_pages         443 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 				       new_mem->num_pages << PAGE_SHIFT,
num_pages         605 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (nodes->size != mem->num_pages)
num_pages         696 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
num_pages         713 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages         734 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		    (mm_node->size == mem->num_pages))
num_pages         815 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
num_pages         828 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	range->end = start + ttm->num_pages * PAGE_SIZE;
num_pages         857 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	for (i = 0; i < ttm->num_pages; i++) {
num_pages         900 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		gtt->userptr, ttm->num_pages);
num_pages         929 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	for (i = 0; i < ttm->num_pages; ++i)
num_pages         950 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
num_pages         951 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 				      ttm->num_pages << PAGE_SHIFT,
num_pages         964 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 					 gtt->ttm.dma_address, ttm->num_pages);
num_pages        1025 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 				ttm->num_pages - page_idx,
num_pages        1029 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
num_pages        1036 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			  ttm->num_pages, gtt->offset);
num_pages        1062 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	if (!ttm->num_pages) {
num_pages        1064 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		     ttm->num_pages, bo_mem, ttm);
num_pages        1082 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
num_pages        1087 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			  ttm->num_pages, gtt->offset);
num_pages        1191 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
num_pages        1194 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			  gtt->ttm.ttm.num_pages, gtt->offset);
num_pages        1271 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 						 ttm->num_pages);
num_pages        1385 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
num_pages        1477 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	unsigned long num_pages = bo->mem.num_pages;
num_pages        1510 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		while (num_pages) {
num_pages        1515 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			num_pages -= node->size;
num_pages        1898 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 			     struct ttm_mem_reg *mem, unsigned num_pages,
num_pages        1925 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	num_bytes = num_pages * 8;
num_pages        1944 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
num_pages        2047 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	unsigned long num_pages;
num_pages        2064 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	num_pages = bo->tbo.num_pages;
num_pages        2067 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	while (num_pages) {
num_pages        2071 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		num_pages -= mm_node->size;
num_pages        2092 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	num_pages = bo->tbo.num_pages;
num_pages        2095 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 	while (num_pages) {
num_pages        2111 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 		num_pages -= mm_node->size;
num_pages         220 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	unsigned pages = mem->num_pages;
num_pages         250 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	if (start > mem->num_pages)
num_pages         251 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		start -= mem->num_pages;
num_pages         287 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
num_pages         305 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
num_pages         320 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	pages_left = mem->num_pages;
num_pages         368 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
num_pages         391 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c 	unsigned pages = mem->num_pages;
num_pages          61 drivers/gpu/drm/drm_cache.c 				    unsigned long num_pages)
num_pages          66 drivers/gpu/drm/drm_cache.c 	for (i = 0; i < num_pages; i++)
num_pages          81 drivers/gpu/drm/drm_cache.c drm_clflush_pages(struct page *pages[], unsigned long num_pages)
num_pages          86 drivers/gpu/drm/drm_cache.c 		drm_cache_flush_clflush(pages, num_pages);
num_pages          95 drivers/gpu/drm/drm_cache.c 	for (i = 0; i < num_pages; i++) {
num_pages         476 drivers/gpu/drm/drm_gem_shmem_helper.c 	loff_t num_pages = obj->size >> PAGE_SHIFT;
num_pages         479 drivers/gpu/drm/drm_gem_shmem_helper.c 	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
num_pages         312 drivers/gpu/drm/drm_gem_vram_helper.c 	ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap);
num_pages          62 drivers/gpu/drm/drm_memory.c 	unsigned long i, num_pages =
num_pages          89 drivers/gpu/drm/drm_memory.c 	page_map = vmalloc(array_size(num_pages, sizeof(struct page *)));
num_pages          94 drivers/gpu/drm/drm_memory.c 	for (i = 0; i < num_pages; ++i)
num_pages          96 drivers/gpu/drm/drm_memory.c 	addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
num_pages         111 drivers/gpu/drm/drm_vram_mm_helper.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages         674 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		unsigned num_pages = npages - pinned;
num_pages         678 drivers/gpu/drm/etnaviv/etnaviv_gem.c 		ret = get_user_pages_fast(ptr, num_pages,
num_pages         411 drivers/gpu/drm/gma500/gtt.c 	unsigned i, num_pages;
num_pages         529 drivers/gpu/drm/gma500/gtt.c 	num_pages = vram_stolen_size >> PAGE_SHIFT;
num_pages         531 drivers/gpu/drm/gma500/gtt.c 		num_pages, pfn_base << PAGE_SHIFT, 0);
num_pages         532 drivers/gpu/drm/gma500/gtt.c 	for (i = 0; i < num_pages; ++i) {
num_pages         500 drivers/gpu/drm/gma500/mmu.c 			       uint32_t num_pages, uint32_t desired_tile_stride,
num_pages         518 drivers/gpu/drm/gma500/mmu.c 		rows = num_pages / desired_tile_stride;
num_pages         520 drivers/gpu/drm/gma500/mmu.c 		desired_tile_stride = num_pages;
num_pages         548 drivers/gpu/drm/gma500/mmu.c 			       uint32_t num_pages, uint32_t desired_tile_stride,
num_pages         556 drivers/gpu/drm/gma500/mmu.c 				 unsigned long address, uint32_t num_pages)
num_pages         567 drivers/gpu/drm/gma500/mmu.c 	end = addr + (num_pages << PAGE_SHIFT);
num_pages         584 drivers/gpu/drm/gma500/mmu.c 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
num_pages         595 drivers/gpu/drm/gma500/mmu.c 			  uint32_t num_pages, uint32_t desired_tile_stride,
num_pages         609 drivers/gpu/drm/gma500/mmu.c 		rows = num_pages / desired_tile_stride;
num_pages         611 drivers/gpu/drm/gma500/mmu.c 		desired_tile_stride = num_pages;
num_pages         641 drivers/gpu/drm/gma500/mmu.c 		psb_mmu_flush_ptes(pd, f_address, num_pages,
num_pages         651 drivers/gpu/drm/gma500/mmu.c 				unsigned long address, uint32_t num_pages,
num_pages         665 drivers/gpu/drm/gma500/mmu.c 	end = addr + (num_pages << PAGE_SHIFT);
num_pages         686 drivers/gpu/drm/gma500/mmu.c 		psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1);
num_pages         697 drivers/gpu/drm/gma500/mmu.c 			 unsigned long address, uint32_t num_pages,
num_pages         714 drivers/gpu/drm/gma500/mmu.c 		if (num_pages % desired_tile_stride != 0)
num_pages         716 drivers/gpu/drm/gma500/mmu.c 		rows = num_pages / desired_tile_stride;
num_pages         718 drivers/gpu/drm/gma500/mmu.c 		desired_tile_stride = num_pages;
num_pages         752 drivers/gpu/drm/gma500/mmu.c 		psb_mmu_flush_ptes(pd, f_address, num_pages,
num_pages          69 drivers/gpu/drm/gma500/mmu.h 					uint32_t num_pages);
num_pages          73 drivers/gpu/drm/gma500/mmu.h 				       uint32_t num_pages, int type);
num_pages          78 drivers/gpu/drm/gma500/mmu.h 				unsigned long address, uint32_t num_pages,
num_pages          82 drivers/gpu/drm/gma500/mmu.h 				 unsigned long address, uint32_t num_pages,
num_pages         430 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 			       struct page **pvec, unsigned long num_pages)
num_pages         442 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
num_pages         443 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 					  0, num_pages << PAGE_SHIFT,
num_pages         582 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
num_pages         618 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		pvec = kvmalloc_array(num_pages, sizeof(struct page *),
num_pages         632 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 						       num_pages,
num_pages         641 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 	} else if (pinned < num_pages) {
num_pages         645 drivers/gpu/drm/i915/gem/i915_gem_userptr.c 		pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
num_pages         278 drivers/gpu/drm/i915/i915_gpu_error.c 	if (dst->page_count >= dst->num_pages)
num_pages         964 drivers/gpu/drm/i915/i915_gpu_error.c 	unsigned long num_pages;
num_pages         974 drivers/gpu/drm/i915/i915_gpu_error.c 	num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages         975 drivers/gpu/drm/i915/i915_gpu_error.c 	num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
num_pages         976 drivers/gpu/drm/i915/i915_gpu_error.c 	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
num_pages         987 drivers/gpu/drm/i915/i915_gpu_error.c 	dst->num_pages = num_pages;
num_pages         130 drivers/gpu/drm/i915/i915_gpu_error.h 			int num_pages;
num_pages         305 drivers/gpu/drm/nouveau/nouveau_bo.c 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
num_pages         363 drivers/gpu/drm/nouveau/nouveau_bo.c 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
num_pages         523 drivers/gpu/drm/nouveau/nouveau_bo.c 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
num_pages         552 drivers/gpu/drm/nouveau/nouveau_bo.c 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
num_pages         572 drivers/gpu/drm/nouveau/nouveau_bo.c 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
num_pages         765 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, new_reg->num_pages);
num_pages         789 drivers/gpu/drm/nouveau/nouveau_bo.c 	u32 page_count = new_reg->num_pages;
num_pages         792 drivers/gpu/drm/nouveau/nouveau_bo.c 	page_count = new_reg->num_pages;
num_pages         827 drivers/gpu/drm/nouveau/nouveau_bo.c 	u32 page_count = new_reg->num_pages;
num_pages         830 drivers/gpu/drm/nouveau/nouveau_bo.c 	page_count = new_reg->num_pages;
num_pages         866 drivers/gpu/drm/nouveau/nouveau_bo.c 	u32 page_count = new_reg->num_pages;
num_pages         869 drivers/gpu/drm/nouveau/nouveau_bo.c 	page_count = new_reg->num_pages;
num_pages         910 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
num_pages         923 drivers/gpu/drm/nouveau/nouveau_bo.c 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
num_pages         954 drivers/gpu/drm/nouveau/nouveau_bo.c 	u64 length = (new_reg->num_pages << PAGE_SHIFT);
num_pages        1051 drivers/gpu/drm/nouveau/nouveau_bo.c 	u32 page_count = new_reg->num_pages;
num_pages        1062 drivers/gpu/drm/nouveau/nouveau_bo.c 	page_count = new_reg->num_pages;
num_pages        1447 drivers/gpu/drm/nouveau/nouveau_bo.c 	reg->bus.size = reg->num_pages << PAGE_SHIFT;
num_pages        1566 drivers/gpu/drm/nouveau/nouveau_bo.c 	    bo->mem.start + bo->mem.num_pages < mappable)
num_pages        1599 drivers/gpu/drm/nouveau/nouveau_bo.c 						 ttm_dma->dma_address, ttm->num_pages);
num_pages        1624 drivers/gpu/drm/nouveau/nouveau_bo.c 	for (i = 0; i < ttm->num_pages; i++) {
num_pages        1674 drivers/gpu/drm/nouveau/nouveau_bo.c 	for (i = 0; i < ttm->num_pages; i++) {
num_pages         381 drivers/gpu/drm/nouveau/nouveau_fbcon.c 	info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
num_pages         384 drivers/gpu/drm/nouveau/nouveau_fbcon.c 	info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
num_pages         244 drivers/gpu/drm/nouveau/nouveau_gem.c 	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
num_pages         642 drivers/gpu/drm/nouveau/nouveau_gem.c 			     nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
num_pages         649 drivers/gpu/drm/nouveau/nouveau_gem.c 			ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
num_pages         822 drivers/gpu/drm/nouveau/nouveau_gem.c 							  num_pages,
num_pages         125 drivers/gpu/drm/nouveau/nouveau_mem.c 				 reg->num_pages << PAGE_SHIFT,
num_pages         140 drivers/gpu/drm/nouveau/nouveau_mem.c 	u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
num_pages          33 drivers/gpu/drm/nouveau/nouveau_prime.c 	int npages = nvbo->bo.num_pages;
num_pages          43 drivers/gpu/drm/nouveau/nouveau_prime.c 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
num_pages         139 drivers/gpu/drm/nouveau/nouveau_ttm.c 			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
num_pages         582 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages         590 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c 	if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
num_pages         138 drivers/gpu/drm/qxl/qxl_object.c 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
num_pages          58 drivers/gpu/drm/qxl/qxl_object.h 	return bo->tbo.num_pages << PAGE_SHIFT;
num_pages         170 drivers/gpu/drm/qxl/qxl_ttm.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages         215 drivers/gpu/drm/qxl/qxl_ttm.c 	if (!ttm->num_pages) {
num_pages         217 drivers/gpu/drm/qxl/qxl_ttm.c 		     ttm->num_pages, bo_mem, ttm);
num_pages         404 drivers/gpu/drm/radeon/radeon_cs.c 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
num_pages          57 drivers/gpu/drm/radeon/radeon_object.c 	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
num_pages         287 drivers/gpu/drm/radeon/radeon_object.c 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
num_pages         656 drivers/gpu/drm/radeon/radeon_object.c 			       bo->tbo.num_pages << PAGE_SHIFT);
num_pages         813 drivers/gpu/drm/radeon/radeon_object.c 	size = bo->mem.num_pages << PAGE_SHIFT;
num_pages          98 drivers/gpu/drm/radeon/radeon_object.h 	return bo->tbo.num_pages << PAGE_SHIFT;
num_pages         103 drivers/gpu/drm/radeon/radeon_object.h 	return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
num_pages          37 drivers/gpu/drm/radeon/radeon_prime.c 	int npages = bo->tbo.num_pages;
num_pages          47 drivers/gpu/drm/radeon/radeon_prime.c 	ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
num_pages          25 drivers/gpu/drm/radeon/radeon_trace.h 			   __entry->pages = bo->tbo.num_pages;
num_pages         209 drivers/gpu/drm/radeon/radeon_ttm.c 	unsigned num_pages;
num_pages         246 drivers/gpu/drm/radeon/radeon_ttm.c 	num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
num_pages         247 drivers/gpu/drm/radeon/radeon_ttm.c 	fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
num_pages         398 drivers/gpu/drm/radeon/radeon_ttm.c 	atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
num_pages         409 drivers/gpu/drm/radeon/radeon_ttm.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages         502 drivers/gpu/drm/radeon/radeon_ttm.c 		unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
num_pages         510 drivers/gpu/drm/radeon/radeon_ttm.c 		unsigned num_pages = ttm->num_pages - pinned;
num_pages         514 drivers/gpu/drm/radeon/radeon_ttm.c 		r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
num_pages         521 drivers/gpu/drm/radeon/radeon_ttm.c 	} while (pinned < ttm->num_pages);
num_pages         523 drivers/gpu/drm/radeon/radeon_ttm.c 	r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
num_pages         524 drivers/gpu/drm/radeon/radeon_ttm.c 				      ttm->num_pages << PAGE_SHIFT,
num_pages         535 drivers/gpu/drm/radeon/radeon_ttm.c 					 gtt->ttm.dma_address, ttm->num_pages);
num_pages         590 drivers/gpu/drm/radeon/radeon_ttm.c 	if (!ttm->num_pages) {
num_pages         592 drivers/gpu/drm/radeon/radeon_ttm.c 		     ttm->num_pages, bo_mem, ttm);
num_pages         596 drivers/gpu/drm/radeon/radeon_ttm.c 	r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
num_pages         600 drivers/gpu/drm/radeon/radeon_ttm.c 			  ttm->num_pages, (unsigned)gtt->offset);
num_pages         610 drivers/gpu/drm/radeon/radeon_ttm.c 	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
num_pages         685 drivers/gpu/drm/radeon/radeon_ttm.c 						 gtt->ttm.dma_address, ttm->num_pages);
num_pages          85 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
num_pages          87 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
num_pages         134 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
num_pages         444 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		return drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
num_pages         547 drivers/gpu/drm/rockchip/rockchip_drm_gem.c 		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
num_pages          23 drivers/gpu/drm/rockchip/rockchip_drm_gem.h 	unsigned long num_pages;
num_pages         263 drivers/gpu/drm/tegra/fb.c 		bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
num_pages          52 drivers/gpu/drm/tegra/gem.c 		return vmap(obj->pages, obj->num_pages, VM_MAP,
num_pages         224 drivers/gpu/drm/tegra/gem.c 	bo->num_pages = bo->gem.size >> PAGE_SHIFT;
num_pages         226 drivers/gpu/drm/tegra/gem.c 	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
num_pages         514 drivers/gpu/drm/tegra/gem.c 		if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
num_pages         517 drivers/gpu/drm/tegra/gem.c 		for_each_sg(sgt->sgl, sg, bo->num_pages, i)
num_pages          38 drivers/gpu/drm/tegra/gem.h 	unsigned long num_pages;
num_pages          60 drivers/gpu/drm/ttm/ttm_agp_backend.c 	mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
num_pages          65 drivers/gpu/drm/ttm/ttm_agp_backend.c 	for (i = 0; i < ttm->num_pages; i++) {
num_pages         102 drivers/gpu/drm/ttm/ttm_bo.c 		   bo, bo->mem.num_pages, bo->mem.size >> 10,
num_pages         408 drivers/gpu/drm/ttm/ttm_bo.c 	ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
num_pages        1191 drivers/gpu/drm/ttm/ttm_bo.c 	mem.num_pages = bo->num_pages;
num_pages        1192 drivers/gpu/drm/ttm/ttm_bo.c 	mem.size = mem.num_pages << PAGE_SHIFT;
num_pages        1220 drivers/gpu/drm/ttm/ttm_bo.c 		     (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
num_pages        1300 drivers/gpu/drm/ttm/ttm_bo.c 	unsigned long num_pages;
num_pages        1314 drivers/gpu/drm/ttm/ttm_bo.c 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages        1315 drivers/gpu/drm/ttm/ttm_bo.c 	if (num_pages == 0) {
num_pages        1336 drivers/gpu/drm/ttm/ttm_bo.c 	bo->num_pages = num_pages;
num_pages        1337 drivers/gpu/drm/ttm/ttm_bo.c 	bo->mem.size = num_pages << PAGE_SHIFT;
num_pages        1339 drivers/gpu/drm/ttm/ttm_bo.c 	bo->mem.num_pages = bo->num_pages;
num_pages        1371 drivers/gpu/drm/ttm/ttm_bo.c 					 bo->mem.num_pages);
num_pages          77 drivers/gpu/drm/ttm/ttm_bo_manager.c 					  mem->num_pages,
num_pages         396 drivers/gpu/drm/ttm/ttm_bo_util.c 		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
num_pages         415 drivers/gpu/drm/ttm/ttm_bo_util.c 		add = new_mem->num_pages - 1;
num_pages         418 drivers/gpu/drm/ttm/ttm_bo_util.c 	for (i = 0; i < new_mem->num_pages; ++i) {
num_pages         579 drivers/gpu/drm/ttm/ttm_bo_util.c 			   unsigned long num_pages,
num_pages         597 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
num_pages         613 drivers/gpu/drm/ttm/ttm_bo_util.c 		map->virtual = vmap(ttm->pages + start_page, num_pages,
num_pages         620 drivers/gpu/drm/ttm/ttm_bo_util.c 		unsigned long start_page, unsigned long num_pages,
num_pages         630 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (num_pages > bo->num_pages)
num_pages         632 drivers/gpu/drm/ttm/ttm_bo_util.c 	if (start_page > bo->num_pages)
num_pages         641 drivers/gpu/drm/ttm/ttm_bo_util.c 		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
num_pages         644 drivers/gpu/drm/ttm/ttm_bo_util.c 		size = num_pages << PAGE_SHIFT;
num_pages         218 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (unlikely(page_offset >= bo->num_pages)) {
num_pages         365 drivers/gpu/drm/ttm/ttm_bo_vm.c 	if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
num_pages         542 drivers/gpu/drm/ttm/ttm_memory.c 			uint64_t num_pages,
num_pages         551 drivers/gpu/drm/ttm/ttm_memory.c 	available -= num_pages;
num_pages        1045 drivers/gpu/drm/ttm/ttm_page_alloc.c 	ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
num_pages        1059 drivers/gpu/drm/ttm/ttm_page_alloc.c 	if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
num_pages        1062 drivers/gpu/drm/ttm/ttm_page_alloc.c 	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
num_pages        1069 drivers/gpu/drm/ttm/ttm_page_alloc.c 	for (i = 0; i < ttm->num_pages; ++i) {
num_pages        1093 drivers/gpu/drm/ttm/ttm_page_alloc.c 	ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
num_pages        1107 drivers/gpu/drm/ttm/ttm_page_alloc.c 	for (i = 0; i < tt->ttm.num_pages; ++i) {
num_pages        1109 drivers/gpu/drm/ttm/ttm_page_alloc.c 		size_t num_pages = 1;
num_pages        1111 drivers/gpu/drm/ttm/ttm_page_alloc.c 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
num_pages        1115 drivers/gpu/drm/ttm/ttm_page_alloc.c 			++num_pages;
num_pages        1119 drivers/gpu/drm/ttm/ttm_page_alloc.c 						  0, num_pages * PAGE_SIZE,
num_pages        1131 drivers/gpu/drm/ttm/ttm_page_alloc.c 		for (j = 1; j < num_pages; ++j) {
num_pages        1144 drivers/gpu/drm/ttm/ttm_page_alloc.c 	for (i = 0; i < tt->ttm.num_pages;) {
num_pages        1146 drivers/gpu/drm/ttm/ttm_page_alloc.c 		size_t num_pages = 1;
num_pages        1153 drivers/gpu/drm/ttm/ttm_page_alloc.c 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
num_pages        1157 drivers/gpu/drm/ttm/ttm_page_alloc.c 			++num_pages;
num_pages        1160 drivers/gpu/drm/ttm/ttm_page_alloc.c 		dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
num_pages        1163 drivers/gpu/drm/ttm/ttm_page_alloc.c 		i += num_pages;
num_pages         356 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	unsigned num_pages;
num_pages         360 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		num_pages = pool->size / PAGE_SIZE;
num_pages         361 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		if (ttm_set_pages_wb(page, num_pages))
num_pages         363 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 			       pool->dev_name, num_pages);
num_pages         891 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	unsigned long num_pages = ttm->num_pages;
num_pages         901 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
num_pages         922 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	while (num_pages >= HPAGE_PMD_NR) {
num_pages         944 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		num_pages -= HPAGE_PMD_NR;
num_pages         959 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	while (num_pages) {
num_pages         975 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 		--num_pages;
num_pages        1072 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c 	for (i = 0; i < ttm->num_pages; i++) {
num_pages          87 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
num_pages          96 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
num_pages         102 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
num_pages         108 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
num_pages         164 drivers/gpu/drm/ttm/ttm_tt.c 		drm_clflush_pages(ttm->pages, ttm->num_pages);
num_pages         166 drivers/gpu/drm/ttm/ttm_tt.c 	for (i = 0; i < ttm->num_pages; ++i) {
num_pages         230 drivers/gpu/drm/ttm/ttm_tt.c 	ttm->num_pages = bo->num_pages;
num_pages         361 drivers/gpu/drm/ttm/ttm_tt.c 	for (i = 0; i < ttm->num_pages; ++i) {
num_pages         403 drivers/gpu/drm/ttm/ttm_tt.c 						ttm->num_pages << PAGE_SHIFT,
num_pages         415 drivers/gpu/drm/ttm/ttm_tt.c 	for (i = 0; i < ttm->num_pages; ++i) {
num_pages         456 drivers/gpu/drm/ttm/ttm_tt.c 	for (i = 0; i < ttm->num_pages; ++i)
num_pages         484 drivers/gpu/drm/ttm/ttm_tt.c 	for (i = 0; i < ttm->num_pages; ++i) {
num_pages          78 drivers/gpu/drm/vgem/vgem_drv.c 	loff_t num_pages;
num_pages          82 drivers/gpu/drm/vgem/vgem_drv.c 	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
num_pages          84 drivers/gpu/drm/vgem/vgem_drv.c 	if (page_offset >= num_pages)
num_pages         191 drivers/gpu/drm/via/via_dmablit.c 		put_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
num_pages         236 drivers/gpu/drm/via/via_dmablit.c 	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
num_pages         239 drivers/gpu/drm/via/via_dmablit.c 	vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
num_pages         243 drivers/gpu/drm/via/via_dmablit.c 			vsg->num_pages,
num_pages         246 drivers/gpu/drm/via/via_dmablit.c 	if (ret != vsg->num_pages) {
num_pages          42 drivers/gpu/drm/via/via_dmablit.h 	unsigned long num_pages;
num_pages         196 drivers/gpu/drm/virtio/virtgpu_object.c 	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
num_pages         208 drivers/gpu/drm/virtio/virtgpu_object.c 	int nr_pages = bo->tbo.num_pages;
num_pages          37 drivers/gpu/drm/virtio/virtgpu_prime.c 	if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
num_pages          42 drivers/gpu/drm/virtio/virtgpu_prime.c 				     bo->tbo.ttm->num_pages);
num_pages         166 drivers/gpu/drm/virtio/virtgpu_ttm.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages          49 drivers/gpu/drm/vkms/vkms_gem.c 	loff_t num_pages;
num_pages          53 drivers/gpu/drm/vkms/vkms_gem.c 	num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
num_pages          55 drivers/gpu/drm/vkms/vkms_gem.c 	if (page_offset > num_pages)
num_pages         484 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 	d.dst_num_pages = dst->num_pages;
num_pages         485 drivers/gpu/drm/vmwgfx/vmwgfx_blit.c 	d.src_num_pages = src->num_pages;
num_pages         226 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	place.lpfn = bo->num_pages;
num_pages         247 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	    bo->mem.start < bo->num_pages &&
num_pages         398 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
num_pages         434 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
num_pages         435 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
num_pages         449 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
num_pages         431 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c 	for (i = 0; i < old_bo->num_pages; ++i) {
num_pages         301 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	unsigned long num_pages;
num_pages         323 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 	unsigned long num_pages;
num_pages         664 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 			unsigned long num_pages,
num_pages        1041 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 		if (unlikely(new_query_bo->base.num_pages > 4)) {
num_pages        1540 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
num_pages          40 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 			 unsigned long num_pages,
num_pages          48 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 	uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
num_pages          49 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 	uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
num_pages          59 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 	define_cmd.numPages = num_pages;
num_pages          74 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 	while (num_pages > 0) {
num_pages          75 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 		unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
num_pages          95 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 		num_pages -= nr;
num_pages         129 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 		 unsigned long num_pages,
num_pages         142 drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c 	return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
num_pages          65 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 		gman->used_gmr_pages += bo->num_pages;
num_pages          72 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 	mem->num_pages = bo->num_pages;
num_pages          78 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 	gman->used_gmr_pages -= bo->num_pages;
num_pages          93 drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c 		gman->used_gmr_pages -= mem->num_pages;
num_pages        1229 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c 	if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
num_pages          60 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	unsigned long num_pages;
num_pages         416 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
num_pages         443 drivers/gpu/drm/vmwgfx/vmwgfx_mob.c 	ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
num_pages          43 drivers/gpu/drm/vmwgfx/vmwgfx_reg.h 	u32 num_pages;
num_pages         343 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
num_pages         859 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c 		if ((u64)buffer->base.num_pages * PAGE_SIZE <
num_pages         515 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 	suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE;
num_pages        1253 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c 	suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
num_pages        1628 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 			if (res->backup->base.num_pages * PAGE_SIZE <
num_pages        1671 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
num_pages        1747 drivers/gpu/drm/vmwgfx/vmwgfx_surface.c 	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
num_pages         264 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	return ++(viter->i) < viter->num_pages;
num_pages         328 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	viter->num_pages = vsgt->num_pages;
num_pages         428 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
num_pages         439 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 		vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
num_pages         445 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 			(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
num_pages         446 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 			 (unsigned long) vsgt->num_pages << PAGE_SHIFT,
num_pages         452 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 		if (vsgt->num_pages > vmw_tt->sgt.nents) {
num_pages         454 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 				sgl_size * (vsgt->num_pages -
num_pages         595 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 				    ttm->num_pages, vmw_be->gmr_id);
num_pages         599 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 				vmw_mob_create(ttm->num_pages);
num_pages         605 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 				    &vmw_be->vsgt, ttm->num_pages,
num_pages         667 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
num_pages         698 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
num_pages         805 drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
num_pages         181 drivers/gpu/drm/xen/xen_drm_front.c 	buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
num_pages          28 drivers/gpu/drm/xen/xen_drm_front_gem.c 	size_t num_pages;
num_pages          47 drivers/gpu/drm/xen/xen_drm_front_gem.c 	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
num_pages          48 drivers/gpu/drm/xen/xen_drm_front_gem.c 	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
num_pages         102 drivers/gpu/drm/xen/xen_drm_front_gem.c 		ret = alloc_xenballooned_pages(xen_obj->num_pages,
num_pages         106 drivers/gpu/drm/xen/xen_drm_front_gem.c 				  xen_obj->num_pages, ret);
num_pages         118 drivers/gpu/drm/xen/xen_drm_front_gem.c 	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
num_pages         155 drivers/gpu/drm/xen/xen_drm_front_gem.c 				free_xenballooned_pages(xen_obj->num_pages,
num_pages         182 drivers/gpu/drm/xen/xen_drm_front_gem.c 	return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
num_pages         207 drivers/gpu/drm/xen/xen_drm_front_gem.c 					       NULL, xen_obj->num_pages);
num_pages         253 drivers/gpu/drm/xen/xen_drm_front_gem.c 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
num_pages         283 drivers/gpu/drm/xen/xen_drm_front_gem.c 	return vmap(xen_obj->pages, xen_obj->num_pages,
num_pages         278 drivers/hv/hv_balloon.c 	__u32 num_pages;
num_pages         450 drivers/hv/hv_balloon.c 	__u32 num_pages;
num_pages        1193 drivers/hv/hv_balloon.c 	int num_pages = range_array->finfo.page_cnt;
num_pages        1198 drivers/hv/hv_balloon.c 	for (i = 0; i < num_pages; i++) {
num_pages        1209 drivers/hv/hv_balloon.c 					unsigned int num_pages,
num_pages        1216 drivers/hv/hv_balloon.c 	for (i = 0; i < num_pages / alloc_unit; i++) {
num_pages        1259 drivers/hv/hv_balloon.c 	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
num_pages        1279 drivers/hv/hv_balloon.c 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
num_pages        1281 drivers/hv/hv_balloon.c 			avail_pages < num_pages ? "Not enough memory." :
num_pages        1284 drivers/hv/hv_balloon.c 		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
num_pages        1294 drivers/hv/hv_balloon.c 		num_pages -= num_ballooned;
num_pages        1295 drivers/hv/hv_balloon.c 		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
num_pages        1303 drivers/hv/hv_balloon.c 		if (num_ballooned == 0 || num_ballooned == num_pages) {
num_pages        1305 drivers/hv/hv_balloon.c 				num_pages, dm_device.balloon_wrk.num_pages);
num_pages        1509 drivers/hv/hv_balloon.c 			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
num_pages          93 drivers/infiniband/hw/mlx5/cmd.c 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
num_pages         112 drivers/infiniband/hw/mlx5/cmd.c 	MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
num_pages         122 drivers/infiniband/hw/mlx5/cmd.c 						      num_pages, 0);
num_pages         126 drivers/infiniband/hw/mlx5/cmd.c 				   page_idx, num_pages);
num_pages         140 drivers/infiniband/hw/mlx5/cmd.c 				     page_idx, num_pages);
num_pages         164 drivers/infiniband/hw/mlx5/cmd.c 	u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
num_pages         182 drivers/infiniband/hw/mlx5/cmd.c 			     start_page_idx, num_pages);
num_pages         129 drivers/infiniband/hw/mlx5/mem.c 			    int page_shift, size_t offset, size_t num_pages,
num_pages         145 drivers/infiniband/hw/mlx5/mem.c 		for (i = 0; i < num_pages; ++i) {
num_pages         186 drivers/infiniband/hw/mlx5/mem.c 			if (i >> shift >= offset + num_pages)
num_pages        1216 drivers/infiniband/hw/mlx5/mlx5_ib.h 			    int page_shift, size_t offset, size_t num_pages,
num_pages         444 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->num_pages = 4;
num_pages         448 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
num_pages         568 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	int num_pages, status;
num_pages         574 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
num_pages         579 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	cmd->cqid_pages = num_pages;
num_pages         594 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
num_pages        1621 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 			       int *num_pages, int *page_size)
num_pages        1637 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	*num_pages =
num_pages         320 drivers/infiniband/hw/ocrdma/ocrdma_sli.h 	u32 num_pages;
num_pages         954 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	uresp.num_pages = 1;
num_pages        1419 drivers/infiniband/hw/qedr/verbs.c 	in_params.num_pages = page_cnt;
num_pages          40 drivers/infiniband/hw/qib/qib_user_pages.c static void __qib_release_user_pages(struct page **p, size_t num_pages,
num_pages          43 drivers/infiniband/hw/qib/qib_user_pages.c 	put_user_pages_dirty_lock(p, num_pages, dirty);
num_pages          94 drivers/infiniband/hw/qib/qib_user_pages.c int qib_get_user_pages(unsigned long start_page, size_t num_pages,
num_pages         102 drivers/infiniband/hw/qib/qib_user_pages.c 	locked = atomic64_add_return(num_pages, &current->mm->pinned_vm);
num_pages         110 drivers/infiniband/hw/qib/qib_user_pages.c 	for (got = 0; got < num_pages; got += ret) {
num_pages         112 drivers/infiniband/hw/qib/qib_user_pages.c 				     num_pages - got,
num_pages         126 drivers/infiniband/hw/qib/qib_user_pages.c 	atomic64_sub(num_pages, &current->mm->pinned_vm);
num_pages         130 drivers/infiniband/hw/qib/qib_user_pages.c void qib_release_user_pages(struct page **p, size_t num_pages)
num_pages         132 drivers/infiniband/hw/qib/qib_user_pages.c 	__qib_release_user_pages(p, num_pages, 1);
num_pages         136 drivers/infiniband/hw/qib/qib_user_pages.c 		atomic64_sub(num_pages, &current->mm->pinned_vm);
num_pages         546 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 				     u64 *page_list, int num_pages);
num_pages         266 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 	u32 num_pages;				/* Num pages incl. header. */
num_pages         426 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
num_pages         502 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
num_pages         929 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
num_pages         931 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 				   dev->dsr->async_ring_pages.num_pages, true);
num_pages         938 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 	dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
num_pages         940 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c 				   dev->dsr->cq_ring_pages.num_pages, true);
num_pages         208 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c 				     int num_pages)
num_pages         213 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c 	if (num_pages > pdir->npages)
num_pages         216 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c 	for (i = 0; i < num_pages; i++) {
num_pages         136 drivers/infiniband/sw/rxe/rxe_loc.h 		      u64 *page, int num_pages, u64 iova);
num_pages         592 drivers/infiniband/sw/rxe/rxe_mr.c 		      u64 *page, int num_pages, u64 iova)
num_pages         601 drivers/infiniband/sw/rxe/rxe_mr.c 	if (num_pages > mem->max_buf) {
num_pages         611 drivers/infiniband/sw/rxe/rxe_mr.c 	for (i = 0; i < num_pages; i++) {
num_pages         626 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->length	= num_pages << mem->page_shift;
num_pages         134 drivers/infiniband/sw/siw/siw.h 	int num_pages;
num_pages          63 drivers/infiniband/sw/siw/siw_mem.c static void siw_free_plist(struct siw_page_chunk *chunk, int num_pages,
num_pages          66 drivers/infiniband/sw/siw/siw_mem.c 	put_user_pages_dirty_lock(chunk->plist, num_pages, dirty);
num_pages          72 drivers/infiniband/sw/siw/siw_mem.c 	int i, num_pages = umem->num_pages;
num_pages          74 drivers/infiniband/sw/siw/siw_mem.c 	for (i = 0; num_pages; i++) {
num_pages          75 drivers/infiniband/sw/siw/siw_mem.c 		int to_free = min_t(int, PAGES_PER_CHUNK, num_pages);
num_pages          80 drivers/infiniband/sw/siw/siw_mem.c 		num_pages -= to_free;
num_pages          82 drivers/infiniband/sw/siw/siw_mem.c 	atomic64_sub(umem->num_pages, &mm_s->pinned_vm);
num_pages         375 drivers/infiniband/sw/siw/siw_mem.c 	int num_pages, num_chunks, i, rv = 0;
num_pages         384 drivers/infiniband/sw/siw/siw_mem.c 	num_pages = PAGE_ALIGN(start + len - first_page_va) >> PAGE_SHIFT;
num_pages         385 drivers/infiniband/sw/siw/siw_mem.c 	num_chunks = (num_pages >> CHUNK_SHIFT) + 1;
num_pages         404 drivers/infiniband/sw/siw/siw_mem.c 	if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
num_pages         416 drivers/infiniband/sw/siw/siw_mem.c 	for (i = 0; num_pages; i++) {
num_pages         417 drivers/infiniband/sw/siw/siw_mem.c 		int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
num_pages         435 drivers/infiniband/sw/siw/siw_mem.c 			umem->num_pages += rv;
num_pages         441 drivers/infiniband/sw/siw/siw_mem.c 		num_pages -= got;
num_pages          69 drivers/infiniband/sw/siw/siw_mem.h 	if (likely(page_idx < umem->num_pages))
num_pages        1320 drivers/infiniband/sw/siw/siw_verbs.c 		unsigned long num_pages =
num_pages        1324 drivers/infiniband/sw/siw/siw_verbs.c 		if (num_pages > mem_limit - current->mm->locked_vm) {
num_pages        1326 drivers/infiniband/sw/siw/siw_verbs.c 				   num_pages, mem_limit,
num_pages         174 drivers/iommu/dma-iommu.c 	int i, num_pages;
num_pages         177 drivers/iommu/dma-iommu.c 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
num_pages         179 drivers/iommu/dma-iommu.c 	for (i = 0; i < num_pages; i++) {
num_pages         267 drivers/md/dm-log-writes.c 	int num_pages, bio_pages, pg_datalen, pg_sectorlen, i;
num_pages         274 drivers/md/dm-log-writes.c 		num_pages = ALIGN(datalen, PAGE_SIZE) >> PAGE_SHIFT;
num_pages         275 drivers/md/dm-log-writes.c 		bio_pages = min(num_pages, BIO_MAX_PAGES);
num_pages         783 drivers/md/md-bitmap.c 	unsigned long num_pages;
num_pages         790 drivers/md/md-bitmap.c 	num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
num_pages         791 drivers/md/md-bitmap.c 	offset = slot_number * num_pages;
num_pages         793 drivers/md/md-bitmap.c 	store->filemap = kmalloc_array(num_pages, sizeof(struct page *),
num_pages         811 drivers/md/md-bitmap.c 	for ( ; pnum < num_pages; pnum++) {
num_pages         824 drivers/md/md-bitmap.c 		roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
num_pages          49 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	unsigned int			num_pages;
num_pages         106 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	int num_pages;
num_pages         120 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	buf->num_pages = size >> PAGE_SHIFT;
num_pages         123 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
num_pages         133 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			buf->num_pages, 0, size, GFP_KERNEL);
num_pages         157 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		__func__, buf->num_pages);
num_pages         164 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	num_pages = buf->num_pages;
num_pages         165 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	while (num_pages--)
num_pages         166 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		__free_page(buf->pages[num_pages]);
num_pages         178 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	int i = buf->num_pages;
num_pages         182 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			buf->num_pages);
num_pages         186 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			vm_unmap_ram(buf->vaddr, buf->num_pages);
num_pages         250 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	buf->num_pages = frame_vector_count(vec);
num_pages         253 drivers/media/common/videobuf2/videobuf2-dma-sg.c 			buf->num_pages, buf->offset, size, 0))
num_pages         285 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	int i = buf->num_pages;
num_pages         288 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	       __func__, buf->num_pages);
num_pages         292 drivers/media/common/videobuf2/videobuf2-dma-sg.c 		vm_unmap_ram(buf->vaddr, buf->num_pages);
num_pages         313 drivers/media/common/videobuf2/videobuf2-dma-sg.c 					buf->num_pages, -1, PAGE_KERNEL);
num_pages         337 drivers/media/common/videobuf2/videobuf2-dma-sg.c 	err = vm_map_pages(vma, buf->pages, buf->num_pages);
num_pages         216 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
num_pages         228 drivers/media/common/videobuf2/videobuf2-vmalloc.c 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
num_pages         227 drivers/misc/genwqe/card_utils.c 			      int num_pages)
num_pages         232 drivers/misc/genwqe/card_utils.c 	for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
num_pages         240 drivers/misc/genwqe/card_utils.c 			   struct page **page_list, int num_pages,
num_pages         247 drivers/misc/genwqe/card_utils.c 	for (i = 0; i < num_pages; i++) {
num_pages         268 drivers/misc/genwqe/card_utils.c 	genwqe_unmap_pages(cd, dma_list, num_pages);
num_pages         272 drivers/misc/genwqe/card_utils.c static int genwqe_sgl_size(int num_pages)
num_pages         274 drivers/misc/genwqe/card_utils.c 	int len, num_tlb = num_pages / 7;
num_pages         276 drivers/misc/genwqe/card_utils.c 	len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
num_pages          57 drivers/misc/mic/scif/scif_debugfs.c 			   window->dma_addr[j], window->num_pages[j]);
num_pages         770 drivers/misc/mic/scif/scif_dma.c 		end = start + (window->num_pages[i] << PAGE_SHIFT);
num_pages         780 drivers/misc/mic/scif/scif_dma.c 		start += (window->num_pages[i] << PAGE_SHIFT);
num_pages         153 drivers/misc/mic/scif/scif_rma.c 	window->num_pages = scif_zalloc(nr_pages * sizeof(*window->num_pages));
num_pages         154 drivers/misc/mic/scif/scif_rma.c 	if (!window->num_pages)
num_pages         214 drivers/misc/mic/scif/scif_rma.c 	scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
num_pages         243 drivers/misc/mic/scif/scif_rma.c 						  window->num_pages[j] <<
num_pages         335 drivers/misc/mic/scif/scif_rma.c 	scif_free(window->num_pages, nr_pages * sizeof(*window->num_pages));
num_pages         389 drivers/misc/mic/scif/scif_rma.c 	vmalloc_num_pages = is_vmalloc_addr(&window->num_pages[0]);
num_pages         402 drivers/misc/mic/scif/scif_rma.c 				    vmalloc_to_page(&window->num_pages[i]) :
num_pages         403 drivers/misc/mic/scif/scif_rma.c 				    virt_to_page(&window->num_pages[i]),
num_pages         480 drivers/misc/mic/scif/scif_rma.c 	window->num_pages = scif_zalloc(nr_pages *
num_pages         481 drivers/misc/mic/scif/scif_rma.c 					sizeof(*window->num_pages));
num_pages         482 drivers/misc/mic/scif/scif_rma.c 	if (!window->num_pages)
num_pages         510 drivers/misc/mic/scif/scif_rma.c 	scif_free(window->num_pages, window->nr_pages *
num_pages         511 drivers/misc/mic/scif/scif_rma.c 		  sizeof(*window->num_pages));
num_pages         552 drivers/misc/mic/scif/scif_rma.c 		window->num_pages[i] = sg_dma_len(sg) >> PAGE_SHIFT;
num_pages         555 drivers/misc/mic/scif/scif_rma.c 			window->num_pages[i] +=
num_pages         599 drivers/misc/mic/scif/scif_rma.c 		window->num_pages[j] = nr_contig_pages;
num_pages         859 drivers/misc/mic/scif/scif_rma.c 				    sizeof(*window->num_pages),
num_pages         866 drivers/misc/mic/scif/scif_rma.c 				    &window->num_pages[i], loop_nr_contig_chunks
num_pages         867 drivers/misc/mic/scif/scif_rma.c 				    * sizeof(*window->num_pages));
num_pages         884 drivers/misc/mic/scif/scif_rma.c 					    &window->num_pages[i],
num_pages         886 drivers/misc/mic/scif/scif_rma.c 					    * sizeof(*window->num_pages));
num_pages         900 drivers/misc/mic/scif/scif_rma.c 					    &window->num_pages[i],
num_pages         902 drivers/misc/mic/scif/scif_rma.c 					    sizeof(*window->num_pages));
num_pages         911 drivers/misc/mic/scif/scif_rma.c 			     sizeof(*window->num_pages), ep->remote_dev);
num_pages         993 drivers/misc/mic/scif/scif_rma.c 			   int num_pages, s64 *out_offset)
num_pages        1002 drivers/misc/mic/scif/scif_rma.c 					page_index + num_pages - 1);
num_pages        1006 drivers/misc/mic/scif/scif_rma.c 		iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages,
num_pages        1125 drivers/misc/mic/scif/scif_rma.c 		if (window->num_pages[j])
num_pages         289 drivers/misc/mic/scif/scif_rma.h 	u64 *num_pages;
num_pages         787 drivers/misc/vmw_balloon.c 				       unsigned int num_pages,
num_pages         816 drivers/misc/vmw_balloon.c 	return vmballoon_cmd(b, cmd, pfn, num_pages);
num_pages         863 drivers/misc/vmw_balloon.c 	unsigned int i, num_pages;
num_pages         865 drivers/misc/vmw_balloon.c 	num_pages = ctl->n_pages;
num_pages         866 drivers/misc/vmw_balloon.c 	if (num_pages == 0)
num_pages         884 drivers/misc/vmw_balloon.c 	for (i = 0; i < num_pages; i++) {
num_pages         128 drivers/misc/vmw_vmci/vmci_queue_pair.c 	size_t num_pages;	/* Number of pages incl. header. */
num_pages         276 drivers/misc/vmw_vmci/vmci_queue_pair.c 	u64 num_pages;
num_pages         280 drivers/misc/vmw_vmci/vmci_queue_pair.c 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
num_pages         281 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (num_pages >
num_pages         287 drivers/misc/vmw_vmci/vmci_queue_pair.c 	pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
num_pages         288 drivers/misc/vmw_vmci/vmci_queue_pair.c 	vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
num_pages         299 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue->kernel_if->num_pages = num_pages;
num_pages         305 drivers/misc/vmw_vmci/vmci_queue_pair.c 	for (i = 0; i < num_pages; i++) {
num_pages         528 drivers/misc/vmw_vmci/vmci_queue_pair.c 	u64 num_pages;
num_pages         533 drivers/misc/vmw_vmci/vmci_queue_pair.c 	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
num_pages         534 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (num_pages > (SIZE_MAX - queue_size) /
num_pages         538 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
num_pages         547 drivers/misc/vmw_vmci/vmci_queue_pair.c 		queue->kernel_if->num_pages = num_pages;
num_pages         627 drivers/misc/vmw_vmci/vmci_queue_pair.c 			     u64 num_pages, bool dirty)
num_pages         631 drivers/misc/vmw_vmci/vmci_queue_pair.c 	for (i = 0; i < num_pages; i++) {
num_pages         654 drivers/misc/vmw_vmci/vmci_queue_pair.c 				     produce_q->kernel_if->num_pages,
num_pages         657 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (retval < (int)produce_q->kernel_if->num_pages) {
num_pages         667 drivers/misc/vmw_vmci/vmci_queue_pair.c 				     consume_q->kernel_if->num_pages,
num_pages         670 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (retval < (int)consume_q->kernel_if->num_pages) {
num_pages         676 drivers/misc/vmw_vmci/vmci_queue_pair.c 				 produce_q->kernel_if->num_pages, false);
num_pages         703 drivers/misc/vmw_vmci/vmci_queue_pair.c 	    produce_q->kernel_if->num_pages * PAGE_SIZE;
num_pages         717 drivers/misc/vmw_vmci/vmci_queue_pair.c 			 produce_q->kernel_if->num_pages, true);
num_pages         720 drivers/misc/vmw_vmci/vmci_queue_pair.c 	       produce_q->kernel_if->num_pages);
num_pages         722 drivers/misc/vmw_vmci/vmci_queue_pair.c 			 consume_q->kernel_if->num_pages, true);
num_pages         725 drivers/misc/vmw_vmci/vmci_queue_pair.c 	       consume_q->kernel_if->num_pages);
num_pages         211 drivers/net/ethernet/8390/smc-ultra.c 	unsigned char num_pages, irqreg, addr, piomode;
num_pages         289 drivers/net/ethernet/8390/smc-ultra.c 		num_pages = num_pages_tbl[(addr >> 4) & 3];
num_pages         296 drivers/net/ethernet/8390/smc-ultra.c 	ei_status.stop_page = num_pages;
num_pages         857 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	int num_pages = __bnx2x_get_page_reg_num(bp);
num_pages         868 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	for (i = 0; i < num_pages; i++) {
num_pages         739 drivers/net/ethernet/broadcom/cnic.c 	for (i = 0; i < dma->num_pages; i++) {
num_pages         753 drivers/net/ethernet/broadcom/cnic.c 	dma->num_pages = 0;
num_pages         761 drivers/net/ethernet/broadcom/cnic.c 	for (i = 0; i < dma->num_pages; i++) {
num_pages         775 drivers/net/ethernet/broadcom/cnic.c 	for (i = 0; i < dma->num_pages; i++) {
num_pages         796 drivers/net/ethernet/broadcom/cnic.c 	dma->num_pages = pages;
num_pages        2323 drivers/net/ethernet/broadcom/cnic.c 	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
num_pages         132 drivers/net/ethernet/broadcom/cnic.h 	int		num_pages;
num_pages        1014 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages        1176 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages        1254 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages        1319 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages        1381 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
num_pages        1428 drivers/net/ethernet/emulex/benet/be_cmds.c 	req->num_pages = 2;
num_pages         390 drivers/net/ethernet/emulex/benet/be_cmds.h 	u16 num_pages;		/* sword */
num_pages         492 drivers/net/ethernet/emulex/benet/be_cmds.h 	u16 num_pages;
num_pages         555 drivers/net/ethernet/emulex/benet/be_cmds.h 	u16 num_pages;
num_pages         563 drivers/net/ethernet/emulex/benet/be_cmds.h 	u16 num_pages;
num_pages         582 drivers/net/ethernet/emulex/benet/be_cmds.h 	u8 num_pages;
num_pages         608 drivers/net/ethernet/emulex/benet/be_cmds.h 	u8 num_pages;
num_pages         353 drivers/net/ethernet/google/gve/gve_adminq.c 		.num_pages = cpu_to_be32(num_entries),
num_pages         100 drivers/net/ethernet/google/gve/gve_adminq.h 	__be32 num_pages;
num_pages         576 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	addr_size = eq->num_pages * sizeof(*eq->dma_addr);
num_pages         581 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	addr_size = eq->num_pages * sizeof(*eq->virt_addr);
num_pages         588 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	for (pg = 0; pg < eq->num_pages; pg++) {
num_pages         641 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	for (pg = 0; pg < eq->num_pages; pg++)
num_pages         691 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
num_pages         701 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c 	if (eq->num_pages > EQ_MAX_PAGES) {
num_pages         184 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h 	int                     num_pages;
num_pages         197 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	size = wqs->num_pages * sizeof(*wqs->page_paddr);
num_pages         202 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	size = wqs->num_pages * sizeof(*wqs->page_vaddr);
num_pages         207 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
num_pages         282 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
num_pages         318 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
num_pages         326 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
num_pages         365 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
num_pages          52 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h 	int                     num_pages;
num_pages         153 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	*npages = MLX5_GET(query_pages_out, out, num_pages);
num_pages         468 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
num_pages        1210 drivers/net/ethernet/mellanox/mlxsw/pci.c 				  u16 num_pages)
num_pages        1217 drivers/net/ethernet/mellanox/mlxsw/pci.c 	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
num_pages        1221 drivers/net/ethernet/mellanox/mlxsw/pci.c 	mlxsw_pci->fw_area.count = num_pages;
num_pages        1224 drivers/net/ethernet/mellanox/mlxsw/pci.c 	for (i = 0; i < num_pages; i++) {
num_pages        1401 drivers/net/ethernet/mellanox/mlxsw/pci.c 	u16 num_pages;
num_pages        1465 drivers/net/ethernet/mellanox/mlxsw/pci.c 	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
num_pages        1466 drivers/net/ethernet/mellanox/mlxsw/pci.c 	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
num_pages        1783 drivers/net/ethernet/qlogic/qed/qed_rdma.c 	p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
num_pages        1021 drivers/net/ethernet/sfc/mcdi_port.c 	int num_pages;
num_pages        1026 drivers/net/ethernet/sfc/mcdi_port.c 		num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
num_pages        1032 drivers/net/ethernet/sfc/mcdi_port.c 		num_pages = SFF_8436_NUM_PAGES;
num_pages        1043 drivers/net/ethernet/sfc/mcdi_port.c 	while (space_remaining && (page < num_pages)) {
num_pages        1200 drivers/net/ethernet/smsc/smc91c92_cs.c     u_short num_pages;
num_pages        1217 drivers/net/ethernet/smsc/smc91c92_cs.c     num_pages = skb->len >> 8;
num_pages        1219 drivers/net/ethernet/smsc/smc91c92_cs.c     if (num_pages > 7) {
num_pages        1220 drivers/net/ethernet/smsc/smc91c92_cs.c 	netdev_err(dev, "Far too big packet error: %d pages\n", num_pages);
num_pages        1239 drivers/net/ethernet/smsc/smc91c92_cs.c     outw(MC_ALLOC | num_pages, ioaddr + MMU_CMD);
num_pages        1551 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		unsigned int num_pages;
num_pages        1558 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
num_pages        1559 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c 		pp_params.order = ilog2(num_pages);
num_pages        1162 drivers/nvme/host/lightnvm.c static NVM_DEV_ATTR_12_RO(num_pages);
num_pages         612 drivers/nvme/target/rdma.c 	int sg_count = num_pages(len);
num_pages         934 drivers/nvme/target/rdma.c 	inline_page_count = num_pages(port->inline_data_size);
num_pages         688 drivers/s390/char/sclp_vt220.c static int __init __sclp_vt220_init(int num_pages)
num_pages         708 drivers/s390/char/sclp_vt220.c 	for (i = 0; i < num_pages; i++) {
num_pages         752 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages         795 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
num_pages         873 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
num_pages         995 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
num_pages        1094 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
num_pages        1137 drivers/scsi/be2iscsi/be_cmds.c 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
num_pages        1169 drivers/scsi/be2iscsi/be_cmds.c 				u32 page_offset, u32 num_pages)
num_pages        1177 drivers/scsi/be2iscsi/be_cmds.c 	u32 temp_num_pages = num_pages;
num_pages        1179 drivers/scsi/be2iscsi/be_cmds.c 	if (num_pages == 0xff)
num_pages        1180 drivers/scsi/be2iscsi/be_cmds.c 		num_pages = 1;
num_pages        1191 drivers/scsi/be2iscsi/be_cmds.c 		req->num_pages = min(num_pages, curr_pages);
num_pages        1193 drivers/scsi/be2iscsi/be_cmds.c 		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
num_pages        1194 drivers/scsi/be2iscsi/be_cmds.c 		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
num_pages        1195 drivers/scsi/be2iscsi/be_cmds.c 		internal_page_offset += req->num_pages;
num_pages        1196 drivers/scsi/be2iscsi/be_cmds.c 		page_offset += req->num_pages;
num_pages        1197 drivers/scsi/be2iscsi/be_cmds.c 		num_pages -= req->num_pages;
num_pages        1200 drivers/scsi/be2iscsi/be_cmds.c 			req->num_pages = temp_num_pages;
num_pages        1209 drivers/scsi/be2iscsi/be_cmds.c 	} while (num_pages > 0);
num_pages         315 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;		/* sword */
num_pages         614 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         649 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         850 drivers/scsi/be2iscsi/be_cmds.h 				u32 num_pages);
num_pages         906 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         927 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         944 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         953 drivers/scsi/be2iscsi/be_cmds.h 	u16 num_pages;
num_pages         647 drivers/scsi/bfa/bfa_fcbuild.c 	int             num_pages = 0;
num_pages         653 drivers/scsi/bfa/bfa_fcbuild.c 		num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
num_pages         656 drivers/scsi/bfa/bfa_fcbuild.c 		num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
num_pages         658 drivers/scsi/bfa/bfa_fcbuild.c 	return num_pages;
num_pages         663 drivers/scsi/bfa/bfa_fcbuild.c 		u32 d_id, u32 s_id, __be16 ox_id, int num_pages)
num_pages         669 drivers/scsi/bfa/bfa_fcbuild.c 	memset(tprlo_acc, 0, (num_pages * 16) + 4);
num_pages         673 drivers/scsi/bfa/bfa_fcbuild.c 	tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
num_pages         675 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
num_pages         687 drivers/scsi/bfa/bfa_fcbuild.c 		  u32 s_id, __be16 ox_id, int num_pages)
num_pages         693 drivers/scsi/bfa/bfa_fcbuild.c 	memset(prlo_acc, 0, (num_pages * 16) + 4);
num_pages         696 drivers/scsi/bfa/bfa_fcbuild.c 	prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
num_pages         698 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
num_pages         847 drivers/scsi/bfa/bfa_fcbuild.c 	      int num_pages)
num_pages         853 drivers/scsi/bfa/bfa_fcbuild.c 	memset(prlo, 0, (num_pages * 16) + 4);
num_pages         856 drivers/scsi/bfa/bfa_fcbuild.c 	prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
num_pages         858 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
num_pages         871 drivers/scsi/bfa/bfa_fcbuild.c 	       int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
num_pages         877 drivers/scsi/bfa/bfa_fcbuild.c 	memset(tprlo, 0, (num_pages * 16) + 4);
num_pages         880 drivers/scsi/bfa/bfa_fcbuild.c 	tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
num_pages         882 drivers/scsi/bfa/bfa_fcbuild.c 	for (page = 0; page < num_pages; page++) {
num_pages         284 drivers/scsi/bfa/bfa_fcbuild.h 		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
num_pages         287 drivers/scsi/bfa/bfa_fcbuild.h 		u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
num_pages         296 drivers/scsi/bfa/bfa_fcbuild.h 		u16 ox_id, int num_pages);
num_pages         299 drivers/scsi/bfa/bfa_fcbuild.h 		u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
num_pages         667 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	int num_pages;
num_pages         721 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
num_pages         725 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	while (num_pages--) {
num_pages         775 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
num_pages         779 drivers/scsi/bnx2fc/bnx2fc_tgt.c 	while (num_pages--) {
num_pages         936 drivers/scsi/bnx2i/bnx2i_hwi.c 	int num_pages;
num_pages         948 drivers/scsi/bnx2i/bnx2i_hwi.c 	num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
num_pages         955 drivers/scsi/bnx2i/bnx2i_hwi.c 	while (num_pages--) {
num_pages         976 drivers/scsi/bnx2i/bnx2i_hwi.c 	num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
num_pages         983 drivers/scsi/bnx2i/bnx2i_hwi.c 	while (num_pages--) {
num_pages        1004 drivers/scsi/bnx2i/bnx2i_hwi.c 	num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
num_pages        1011 drivers/scsi/bnx2i/bnx2i_hwi.c 	while (num_pages--) {
num_pages        1182 drivers/scsi/qedf/qedf_main.c 	int num_pages;
num_pages        1209 drivers/scsi/qedf/qedf_main.c 	num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
num_pages        1213 drivers/scsi/qedf/qedf_main.c 	while (num_pages--) {
num_pages        2902 drivers/scsi/qedf/qedf_main.c 	int num_pages;
num_pages        2988 drivers/scsi/qedf/qedf_main.c 		num_pages = qedf->global_queues[i]->cq_mem_size /
num_pages        2993 drivers/scsi/qedf/qedf_main.c 		while (num_pages--) {
num_pages        1559 drivers/scsi/qedi/qedi_main.c 	int num_pages;
num_pages        1648 drivers/scsi/qedi/qedi_main.c 		num_pages = qedi->global_queues[i]->cq_mem_size /
num_pages        1653 drivers/scsi/qedi/qedi_main.c 		while (num_pages--) {
num_pages        1694 drivers/scsi/qedi/qedi_main.c 	int num_pages;
num_pages        1724 drivers/scsi/qedi/qedi_main.c 	num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
num_pages        1728 drivers/scsi/qedi/qedi_main.c 	while (num_pages--) {
num_pages         468 drivers/staging/gasket/gasket_page_table.c 				  uint num_pages, int is_simple_mapping)
num_pages         477 drivers/staging/gasket/gasket_page_table.c 	for (i = 0; i < num_pages; i++) {
num_pages         576 drivers/staging/gasket/gasket_page_table.c 				       ulong dev_addr, uint num_pages)
num_pages         580 drivers/staging/gasket/gasket_page_table.c 				      num_pages))
num_pages         592 drivers/staging/gasket/gasket_page_table.c 				     u64 __iomem *slots, uint num_pages,
num_pages         600 drivers/staging/gasket/gasket_page_table.c 	for (i = 0; i < num_pages; i++) {
num_pages         630 drivers/staging/gasket/gasket_page_table.c 				      ulong dev_addr, uint num_pages)
num_pages         635 drivers/staging/gasket/gasket_page_table.c 				 pg_tbl->base_slot + slot, num_pages, 1);
num_pages         643 drivers/staging/gasket/gasket_page_table.c 					ulong dev_addr, uint num_pages)
num_pages         649 drivers/staging/gasket/gasket_page_table.c 	remain = num_pages;
num_pages         708 drivers/staging/gasket/gasket_page_table.c 					  ulong dev_addr, uint num_pages)
num_pages         728 drivers/staging/gasket/gasket_page_table.c 	if (page_index + num_pages > pg_tbl->num_simple_entries) {
num_pages         731 drivers/staging/gasket/gasket_page_table.c 			page_index + num_pages, pg_tbl->num_simple_entries);
num_pages         746 drivers/staging/gasket/gasket_page_table.c 					    ulong dev_addr, uint num_pages)
num_pages         770 drivers/staging/gasket/gasket_page_table.c 	num_lvl0_pages = DIV_ROUND_UP(num_pages, GASKET_PAGES_PER_SUBTABLE);
num_pages         802 drivers/staging/gasket/gasket_page_table.c 					   ulong dev_addr, uint num_pages)
num_pages         804 drivers/staging/gasket/gasket_page_table.c 	if (!num_pages)
num_pages         808 drivers/staging/gasket/gasket_page_table.c 		gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
num_pages         810 drivers/staging/gasket/gasket_page_table.c 		gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
num_pages         819 drivers/staging/gasket/gasket_page_table.c 				   uint num_pages)
num_pages         824 drivers/staging/gasket/gasket_page_table.c 	ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
num_pages         828 drivers/staging/gasket/gasket_page_table.c 			slot_idx, dev_addr, slot_idx + num_pages - 1);
num_pages         834 drivers/staging/gasket/gasket_page_table.c 				     num_pages, 1);
num_pages         837 drivers/staging/gasket/gasket_page_table.c 		gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
num_pages         955 drivers/staging/gasket/gasket_page_table.c 				     uint num_pages)
num_pages         963 drivers/staging/gasket/gasket_page_table.c 	ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
num_pages         965 drivers/staging/gasket/gasket_page_table.c 		dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
num_pages         976 drivers/staging/gasket/gasket_page_table.c 	remain = num_pages;
num_pages         991 drivers/staging/gasket/gasket_page_table.c 						       num_pages);
num_pages        1013 drivers/staging/gasket/gasket_page_table.c 			  ulong dev_addr, uint num_pages)
num_pages        1017 drivers/staging/gasket/gasket_page_table.c 	if (!num_pages)
num_pages        1024 drivers/staging/gasket/gasket_page_table.c 					      num_pages);
num_pages        1027 drivers/staging/gasket/gasket_page_table.c 						num_pages);
num_pages        1045 drivers/staging/gasket/gasket_page_table.c 			     uint num_pages)
num_pages        1047 drivers/staging/gasket/gasket_page_table.c 	if (!num_pages)
num_pages        1051 drivers/staging/gasket/gasket_page_table.c 	gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
num_pages        1153 drivers/staging/gasket/gasket_page_table.c 	uint num_pages = bytes / PAGE_SIZE;
num_pages        1161 drivers/staging/gasket/gasket_page_table.c 	if (num_pages == 0) {
num_pages        1170 drivers/staging/gasket/gasket_page_table.c 						     num_pages);
num_pages        1171 drivers/staging/gasket/gasket_page_table.c 	return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
num_pages        1232 drivers/staging/gasket/gasket_page_table.c 	unsigned int num_pages = size / PAGE_SIZE;
num_pages        1244 drivers/staging/gasket/gasket_page_table.c 	for (j = 0; j < num_pages; j++) {
num_pages        1258 drivers/staging/gasket/gasket_page_table.c 	unsigned int num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
num_pages        1265 drivers/staging/gasket/gasket_page_table.c 	if (num_pages == 0)
num_pages        1269 drivers/staging/gasket/gasket_page_table.c 				 num_pages * PAGE_SIZE, &handle, GFP_KERNEL);
num_pages        1273 drivers/staging/gasket/gasket_page_table.c 	gasket_dev->page_table[index]->num_coherent_pages = num_pages;
num_pages        1277 drivers/staging/gasket/gasket_page_table.c 		kcalloc(num_pages,
num_pages        1284 drivers/staging/gasket/gasket_page_table.c 		PAGE_SIZE * (num_pages);
num_pages        1289 drivers/staging/gasket/gasket_page_table.c 	for (j = 0; j < num_pages; j++) {
num_pages        1301 drivers/staging/gasket/gasket_page_table.c 				  num_pages * PAGE_SIZE, mem, handle);
num_pages          98 drivers/staging/gasket/gasket_page_table.h 			  ulong dev_addr, uint num_pages);
num_pages         109 drivers/staging/gasket/gasket_page_table.h 			     ulong dev_addr, uint num_pages);
num_pages          41 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	unsigned int num_pages;
num_pages         333 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
num_pages         339 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 		for (i = 0; i < pagelistinfo->num_pages; i++)
num_pages         362 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	unsigned int num_pages, offset, i, k;
num_pages         373 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
num_pages         375 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
num_pages         382 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			(num_pages * sizeof(u32)) +
num_pages         383 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			(num_pages * sizeof(pages[0]) +
num_pages         384 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			(num_pages * sizeof(struct scatterlist))) +
num_pages         399 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	pages		= (struct page **)(addrs + num_pages);
num_pages         400 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	scatterlist	= (struct scatterlist *)(pages + num_pages);
num_pages         402 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			  (scatterlist + num_pages);
num_pages         414 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	pagelistinfo->num_pages = num_pages;
num_pages         424 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 		for (actual_pages = 0; actual_pages < num_pages;
num_pages         445 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 					  num_pages,
num_pages         449 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 		if (actual_pages != num_pages) {
num_pages         452 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 				       __func__, actual_pages, num_pages);
num_pages         470 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	sg_init_table(scatterlist, num_pages);
num_pages         472 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	for (i = 0; i < num_pages; i++)	{
num_pages         484 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 				 num_pages,
num_pages         549 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 	unsigned int num_pages = pagelistinfo->num_pages;
num_pages         559 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
num_pages         586 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			memcpy((char *)kmap(pages[num_pages - 1]) +
num_pages         591 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 			kunmap(pages[num_pages - 1]);
num_pages         606 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c 		for (i = 0; i < num_pages; i++)
num_pages         456 drivers/tee/optee/call.c void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
num_pages         501 drivers/tee/optee/call.c 			if (!--num_pages)
num_pages         552 drivers/tee/optee/call.c static int check_mem_type(unsigned long start, size_t num_pages)
num_pages         566 drivers/tee/optee/call.c 			      start + num_pages * PAGE_SIZE);
num_pages         573 drivers/tee/optee/call.c 		       struct page **pages, size_t num_pages,
num_pages         582 drivers/tee/optee/call.c 	if (!num_pages)
num_pages         585 drivers/tee/optee/call.c 	rc = check_mem_type(start, num_pages);
num_pages         589 drivers/tee/optee/call.c 	pages_list = optee_allocate_pages_list(num_pages);
num_pages         599 drivers/tee/optee/call.c 	optee_fill_pages_list(pages_list, pages, num_pages,
num_pages         620 drivers/tee/optee/call.c 	optee_free_pages_list(pages_list, num_pages);
num_pages         648 drivers/tee/optee/call.c 			    struct page **pages, size_t num_pages,
num_pages         655 drivers/tee/optee/call.c 	return check_mem_type(start, num_pages);
num_pages         157 drivers/tee/optee/optee_private.h 		       struct page **pages, size_t num_pages,
num_pages         162 drivers/tee/optee/optee_private.h 			    struct page **pages, size_t num_pages,
num_pages         173 drivers/tee/optee/optee_private.h void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
num_pages          41 drivers/tee/tee_shm.c 		for (n = 0; n < shm->num_pages; n++)
num_pages         230 drivers/tee/tee_shm.c 	int num_pages;
num_pages         261 drivers/tee/tee_shm.c 	num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
num_pages         262 drivers/tee/tee_shm.c 	shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
num_pages         268 drivers/tee/tee_shm.c 	rc = get_user_pages_fast(start, num_pages, FOLL_WRITE, shm->pages);
num_pages         270 drivers/tee/tee_shm.c 		shm->num_pages = rc;
num_pages         271 drivers/tee/tee_shm.c 	if (rc != num_pages) {
num_pages         288 drivers/tee/tee_shm.c 					     shm->num_pages, start);
num_pages         325 drivers/tee/tee_shm.c 			for (n = 0; n < shm->num_pages; n++)
num_pages         459 drivers/video/fbdev/efifb.c 		    (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
num_pages         155 drivers/virt/fsl_hypervisor.c 	unsigned int num_pages;
num_pages         221 drivers/virt/fsl_hypervisor.c 	num_pages = (param.count + lb_offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages         229 drivers/virt/fsl_hypervisor.c 	pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
num_pages         239 drivers/virt/fsl_hypervisor.c 	sg_list_unaligned = kmalloc(num_pages * sizeof(struct fh_sg_list) +
num_pages         250 drivers/virt/fsl_hypervisor.c 		num_pages, param.source != -1 ? FOLL_WRITE : 0, pages);
num_pages         252 drivers/virt/fsl_hypervisor.c 	if (num_pinned != num_pages) {
num_pages         275 drivers/virt/fsl_hypervisor.c 	for (i = 1; i < num_pages; i++) {
num_pages         292 drivers/virt/fsl_hypervisor.c 		virt_to_phys(sg_list), num_pages);
num_pages         296 drivers/virt/fsl_hypervisor.c 		for (i = 0; i < num_pages; i++)
num_pages          95 drivers/virtio/virtio_balloon.c 	unsigned int num_pages;
num_pages         205 drivers/virtio/virtio_balloon.c 		vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
num_pages         247 drivers/virtio/virtio_balloon.c 	num = min(num, (size_t)vb->num_pages);
num_pages         255 drivers/virtio/virtio_balloon.c 		vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
num_pages         359 drivers/virtio/virtio_balloon.c 	u32 num_pages;
num_pages         361 drivers/virtio/virtio_balloon.c 	virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
num_pages         362 drivers/virtio/virtio_balloon.c 		     &num_pages);
num_pages         366 drivers/virtio/virtio_balloon.c 		num_pages = le32_to_cpu((__force __le32)num_pages);
num_pages         368 drivers/virtio/virtio_balloon.c 	target = num_pages;
num_pages         369 drivers/virtio/virtio_balloon.c 	return target - vb->num_pages;
num_pages         422 drivers/virtio/virtio_balloon.c 	u32 actual = vb->num_pages;
num_pages         807 drivers/virtio/virtio_balloon.c 	while (vb->num_pages && pages_freed < pages_to_free)
num_pages         843 drivers/virtio/virtio_balloon.c 	count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
num_pages         973 drivers/virtio/virtio_balloon.c 	while (vb->num_pages)
num_pages         974 drivers/virtio/virtio_balloon.c 		leak_balloon(vb, vb->num_pages);
num_pages         169 drivers/xen/xen-front-pgdir-shbuf.c 	return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
num_pages         196 drivers/xen/xen-front-pgdir-shbuf.c 	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
num_pages         217 drivers/xen/xen-front-pgdir-shbuf.c 	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
num_pages         222 drivers/xen/xen-front-pgdir-shbuf.c 	for (i = 0; i < buf->num_pages; i++) {
num_pages         231 drivers/xen/xen-front-pgdir-shbuf.c 				buf->num_pages);
num_pages         233 drivers/xen/xen-front-pgdir-shbuf.c 	for (i = 0; i < buf->num_pages; i++) {
num_pages         262 drivers/xen/xen-front-pgdir-shbuf.c 	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
num_pages         266 drivers/xen/xen-front-pgdir-shbuf.c 	buf->backend_map_handles = kcalloc(buf->num_pages,
num_pages         280 drivers/xen/xen-front-pgdir-shbuf.c 	grefs_left = buf->num_pages;
num_pages         304 drivers/xen/xen-front-pgdir-shbuf.c 	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
num_pages         307 drivers/xen/xen-front-pgdir-shbuf.c 	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
num_pages         374 drivers/xen/xen-front-pgdir-shbuf.c 	grefs_left = buf->num_pages;
num_pages         410 drivers/xen/xen-front-pgdir-shbuf.c 	for (i = 0; i < buf->num_pages; i++) {
num_pages         527 drivers/xen/xen-front-pgdir-shbuf.c 	buf->num_pages = cfg->num_pages;
num_pages        2157 fs/btrfs/block-group.c 	u64 num_pages = 0;
num_pages        2266 fs/btrfs/block-group.c 	num_pages = div_u64(block_group->key.offset, SZ_256M);
num_pages        2267 fs/btrfs/block-group.c 	if (!num_pages)
num_pages        2268 fs/btrfs/block-group.c 		num_pages = 1;
num_pages        2270 fs/btrfs/block-group.c 	num_pages *= 16;
num_pages        2271 fs/btrfs/block-group.c 	num_pages *= PAGE_SIZE;
num_pages        2273 fs/btrfs/block-group.c 	ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages);
num_pages        2277 fs/btrfs/block-group.c 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
num_pages        2278 fs/btrfs/block-group.c 					      num_pages, num_pages,
num_pages         323 fs/btrfs/check-integrity.c 				     char **datav, unsigned int num_pages);
num_pages         326 fs/btrfs/check-integrity.c 					  unsigned int num_pages,
num_pages        1566 fs/btrfs/check-integrity.c 		unsigned int num_pages;
num_pages        1570 fs/btrfs/check-integrity.c 		num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
num_pages        1572 fs/btrfs/check-integrity.c 		while (num_pages > 0) {
num_pages        1573 fs/btrfs/check-integrity.c 			num_pages--;
num_pages        1574 fs/btrfs/check-integrity.c 			if (block_ctx->datav[num_pages]) {
num_pages        1575 fs/btrfs/check-integrity.c 				kunmap(block_ctx->pagev[num_pages]);
num_pages        1576 fs/btrfs/check-integrity.c 				block_ctx->datav[num_pages] = NULL;
num_pages        1578 fs/btrfs/check-integrity.c 			if (block_ctx->pagev[num_pages]) {
num_pages        1579 fs/btrfs/check-integrity.c 				__free_page(block_ctx->pagev[num_pages]);
num_pages        1580 fs/btrfs/check-integrity.c 				block_ctx->pagev[num_pages] = NULL;
num_pages        1594 fs/btrfs/check-integrity.c 	unsigned int num_pages;
num_pages        1609 fs/btrfs/check-integrity.c 	num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >>
num_pages        1612 fs/btrfs/check-integrity.c 	block_ctx->mem_to_free = kcalloc(num_pages, size, GFP_NOFS);
num_pages        1616 fs/btrfs/check-integrity.c 	block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
num_pages        1617 fs/btrfs/check-integrity.c 	for (i = 0; i < num_pages; i++) {
num_pages        1624 fs/btrfs/check-integrity.c 	for (i = 0; i < num_pages;) {
num_pages        1628 fs/btrfs/check-integrity.c 		bio = btrfs_io_bio_alloc(num_pages - i);
num_pages        1633 fs/btrfs/check-integrity.c 		for (j = i; j < num_pages; j++) {
num_pages        1653 fs/btrfs/check-integrity.c 	for (i = 0; i < num_pages; i++)
num_pages        1710 fs/btrfs/check-integrity.c 		char **datav, unsigned int num_pages)
num_pages        1718 fs/btrfs/check-integrity.c 	if (num_pages * PAGE_SIZE < state->metablock_size)
num_pages        1720 fs/btrfs/check-integrity.c 	num_pages = state->metablock_size >> PAGE_SHIFT;
num_pages        1729 fs/btrfs/check-integrity.c 	for (i = 0; i < num_pages; i++) {
num_pages        1745 fs/btrfs/check-integrity.c 					  unsigned int num_pages,
num_pages        1762 fs/btrfs/check-integrity.c 	if (num_pages == 0)
num_pages        1767 fs/btrfs/check-integrity.c 						      num_pages));
num_pages        1778 fs/btrfs/check-integrity.c 			if (num_pages * PAGE_SIZE <
num_pages        1794 fs/btrfs/check-integrity.c 				if (num_pages * PAGE_SIZE <
num_pages        1827 fs/btrfs/check-integrity.c 			if (num_pages * PAGE_SIZE <
num_pages        2100 fs/btrfs/check-integrity.c 	num_pages -= processed_len >> PAGE_SHIFT;
num_pages        2948 fs/btrfs/ctree.h 		      size_t num_pages, loff_t pos, size_t write_bytes,
num_pages        2264 fs/btrfs/extent_io.c 	int i, num_pages = num_extent_pages(eb);
num_pages        2270 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        3653 fs/btrfs/extent_io.c 	int i, num_pages, failed_page_nr;
num_pages        3707 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        3708 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        3860 fs/btrfs/extent_io.c 	int i, num_pages;
num_pages        3866 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        3867 fs/btrfs/extent_io.c 	atomic_set(&eb->io_pages, num_pages);
num_pages        3885 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        3899 fs/btrfs/extent_io.c 			if (atomic_sub_and_test(num_pages - i, &eb->io_pages))
num_pages        3910 fs/btrfs/extent_io.c 		for (; i < num_pages; i++) {
num_pages        4867 fs/btrfs/extent_io.c 	int num_pages;
num_pages        4872 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        4873 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        4965 fs/btrfs/extent_io.c 	int num_pages = num_extent_pages(src);
num_pages        4971 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        4994 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5001 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5002 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5061 fs/btrfs/extent_io.c 	int num_pages, i;
num_pages        5065 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5066 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5157 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5180 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5181 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++, index++) {
num_pages        5260 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++)
num_pages        5266 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5367 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5370 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5372 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5395 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5402 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5407 fs/btrfs/extent_io.c 		for (i = 0; i < num_pages; i++)
num_pages        5411 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++)
num_pages        5422 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5425 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5426 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5437 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5440 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5441 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5455 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5464 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(eb);
num_pages        5465 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5480 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5496 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5537 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++) {
num_pages        5788 fs/btrfs/extent_io.c 	int num_pages;
num_pages        5792 fs/btrfs/extent_io.c 	num_pages = num_extent_pages(dst);
num_pages        5793 fs/btrfs/extent_io.c 	for (i = 0; i < num_pages; i++)
num_pages         450 fs/btrfs/file.c static void btrfs_drop_pages(struct page **pages, size_t num_pages)
num_pages         453 fs/btrfs/file.c 	for (i = 0; i < num_pages; i++) {
num_pages         516 fs/btrfs/file.c 		      size_t num_pages, loff_t pos, size_t write_bytes,
num_pages         566 fs/btrfs/file.c 	for (i = 0; i < num_pages; i++) {
num_pages        1422 fs/btrfs/file.c 				  size_t num_pages, loff_t pos,
num_pages        1431 fs/btrfs/file.c 	for (i = 0; i < num_pages; i++) {
num_pages        1444 fs/btrfs/file.c 		if (!err && i == num_pages - 1)
num_pages        1482 fs/btrfs/file.c 				size_t num_pages, loff_t pos,
num_pages        1510 fs/btrfs/file.c 			for (i = 0; i < num_pages; i++) {
num_pages        1541 fs/btrfs/file.c 	for (i = 0; i < num_pages; i++) {
num_pages        1619 fs/btrfs/file.c 		size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
num_pages        1628 fs/btrfs/file.c 		WARN_ON(num_pages > nrptrs);
num_pages        1661 fs/btrfs/file.c 				num_pages = DIV_ROUND_UP(write_bytes + offset,
num_pages        1691 fs/btrfs/file.c 		ret = prepare_pages(inode, pages, num_pages,
num_pages        1702 fs/btrfs/file.c 				num_pages, pos, write_bytes, &lockstart,
num_pages        1778 fs/btrfs/file.c 			btrfs_drop_pages(pages, num_pages);
num_pages        1797 fs/btrfs/file.c 		btrfs_drop_pages(pages, num_pages);
num_pages         303 fs/btrfs/free-space-cache.c 	int num_pages;
num_pages         306 fs/btrfs/free-space-cache.c 	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
num_pages         313 fs/btrfs/free-space-cache.c 	    (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
num_pages         318 fs/btrfs/free-space-cache.c 	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
num_pages         322 fs/btrfs/free-space-cache.c 	io_ctl->num_pages = num_pages;
num_pages         347 fs/btrfs/free-space-cache.c 	ASSERT(io_ctl->index < io_ctl->num_pages);
num_pages         362 fs/btrfs/free-space-cache.c 	for (i = 0; i < io_ctl->num_pages; i++) {
num_pages         378 fs/btrfs/free-space-cache.c 	for (i = 0; i < io_ctl->num_pages; i++) {
num_pages         403 fs/btrfs/free-space-cache.c 	for (i = 0; i < io_ctl->num_pages; i++) {
num_pages         422 fs/btrfs/free-space-cache.c 		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
num_pages         423 fs/btrfs/free-space-cache.c 		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
num_pages         443 fs/btrfs/free-space-cache.c 		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
num_pages         445 fs/btrfs/free-space-cache.c 			(sizeof(u32) * io_ctl->num_pages);
num_pages         475 fs/btrfs/free-space-cache.c 		offset = sizeof(u32) * io_ctl->num_pages;
num_pages         497 fs/btrfs/free-space-cache.c 		offset = sizeof(u32) * io_ctl->num_pages;
num_pages         538 fs/btrfs/free-space-cache.c 	if (io_ctl->index >= io_ctl->num_pages)
num_pages         557 fs/btrfs/free-space-cache.c 		if (io_ctl->index >= io_ctl->num_pages)
num_pages         564 fs/btrfs/free-space-cache.c 	if (io_ctl->index < io_ctl->num_pages)
num_pages         580 fs/btrfs/free-space-cache.c 	while (io_ctl->index < io_ctl->num_pages) {
num_pages        1320 fs/btrfs/free-space-cache.c 	ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
num_pages          47 fs/btrfs/free-space-cache.h 	int num_pages;
num_pages        1237 fs/btrfs/ioctl.c 				    unsigned long num_pages)
num_pages        1257 fs/btrfs/ioctl.c 	page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
num_pages         982 fs/btrfs/raid56.c 	int num_pages = rbio_nr_pages(stripe_len, real_stripes);
num_pages         987 fs/btrfs/raid56.c 		       sizeof(*rbio->stripe_pages) * num_pages +
num_pages         988 fs/btrfs/raid56.c 		       sizeof(*rbio->bio_pages) * num_pages +
num_pages        1005 fs/btrfs/raid56.c 	rbio->nr_pages = num_pages;
num_pages        1023 fs/btrfs/raid56.c 	CONSUME_ALLOC(rbio->stripe_pages, num_pages);
num_pages        1024 fs/btrfs/raid56.c 	CONSUME_ALLOC(rbio->bio_pages, num_pages);
num_pages         266 fs/ceph/addr.c 	int num_pages;
num_pages         276 fs/ceph/addr.c 	num_pages = calc_pages_for((u64)osd_data->alignment,
num_pages         278 fs/ceph/addr.c 	for (i = 0; i < num_pages; i++) {
num_pages         699 fs/ceph/addr.c 	int num_pages, total_pages = 0;
num_pages         733 fs/ceph/addr.c 		num_pages = calc_pages_for((u64)osd_data->alignment,
num_pages         735 fs/ceph/addr.c 		total_pages += num_pages;
num_pages         736 fs/ceph/addr.c 		for (j = 0; j < num_pages; j++) {
num_pages         760 fs/ceph/addr.c 		     inode, osd_data->length, rc >= 0 ? num_pages : 0);
num_pages         762 fs/ceph/addr.c 		release_pages(osd_data->pages, num_pages);
num_pages         614 fs/ceph/file.c 		int num_pages;
num_pages         639 fs/ceph/file.c 			num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
num_pages         646 fs/ceph/file.c 			num_pages = calc_pages_for(off, len);
num_pages         648 fs/ceph/file.c 			pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
num_pages         685 fs/ceph/file.c 			ceph_put_page_vector(pages, num_pages, false);
num_pages         702 fs/ceph/file.c 			ceph_release_page_vector(pages, num_pages);
num_pages         938 fs/ceph/file.c 	int num_pages = 0;
num_pages         990 fs/ceph/file.c 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
num_pages        1030 fs/ceph/file.c 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
num_pages        1059 fs/ceph/file.c 				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
num_pages        1068 fs/ceph/file.c 		put_bvecs(bvecs, num_pages, should_dirty);
num_pages        1140 fs/ceph/file.c 	int num_pages;
num_pages        1187 fs/ceph/file.c 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages        1189 fs/ceph/file.c 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
num_pages        1196 fs/ceph/file.c 		for (n = 0; n < num_pages; n++) {
num_pages        1207 fs/ceph/file.c 			ceph_release_page_vector(pages, num_pages);
num_pages        2638 fs/cifs/file.c cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
num_pages        2643 fs/cifs/file.c 	for (i = 0; i < num_pages; i++) {
num_pages        2650 fs/cifs/file.c 			num_pages = i;
num_pages        2657 fs/cifs/file.c 		for (i = 0; i < num_pages; i++)
num_pages        2666 fs/cifs/file.c 	size_t num_pages;
num_pages        2670 fs/cifs/file.c 	num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
num_pages        2675 fs/cifs/file.c 	return num_pages;
num_pages        2715 fs/cifs/file.c 		      size_t *len, unsigned long *num_pages)
num_pages        2718 fs/cifs/file.c 	unsigned long i, nr_pages = *num_pages;
num_pages        2751 fs/cifs/file.c 	*num_pages = i + 1;
num_pages        2826 fs/cifs/file.c 	unsigned long nr_pages, num_pages, i;
num_pages        2921 fs/cifs/file.c 			num_pages = nr_pages;
num_pages        2923 fs/cifs/file.c 				wdata, from, &cur_len, &num_pages);
num_pages        2937 fs/cifs/file.c 			for ( ; nr_pages > num_pages; nr_pages--)
num_pages        4285 fs/cifs/file.c 	struct list_head *page_list, unsigned num_pages)
num_pages        4304 fs/cifs/file.c 					 &num_pages);
num_pages        4319 fs/cifs/file.c 		 __func__, file, mapping, num_pages);
num_pages        2447 fs/cifs/smbdirect.c 	struct smbd_connection *info, struct page *pages[], int num_pages,
num_pages        2455 fs/cifs/smbdirect.c 	if (num_pages > info->max_frmr_depth) {
num_pages        2457 fs/cifs/smbdirect.c 			num_pages, info->max_frmr_depth);
num_pages        2467 fs/cifs/smbdirect.c 	smbdirect_mr->sgl_count = num_pages;
num_pages        2468 fs/cifs/smbdirect.c 	sg_init_table(smbdirect_mr->sgl, num_pages);
num_pages        2471 fs/cifs/smbdirect.c 			num_pages, offset, tailsz);
num_pages        2473 fs/cifs/smbdirect.c 	if (num_pages == 1) {
num_pages        2482 fs/cifs/smbdirect.c 	while (i < num_pages - 1) {
num_pages        2492 fs/cifs/smbdirect.c 	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
num_pages        2495 fs/cifs/smbdirect.c 			num_pages, dir, rc);
num_pages        2499 fs/cifs/smbdirect.c 	rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
num_pages        2501 fs/cifs/smbdirect.c 	if (rc != num_pages) {
num_pages        2504 fs/cifs/smbdirect.c 			rc, num_pages);
num_pages         307 fs/cifs/smbdirect.h 	struct smbd_connection *info, struct page *pages[], int num_pages,
num_pages         456 fs/fuse/cuse.c 	ap->num_pages = 1;
num_pages         956 fs/fuse/dev.c  	for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
num_pages        1631 fs/fuse/dev.c  	release_pages(ra->ap.pages, ra->ap.num_pages);
num_pages        1645 fs/fuse/dev.c  	unsigned int num_pages;
num_pages        1660 fs/fuse/dev.c  	num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
num_pages        1661 fs/fuse/dev.c  	num_pages = min(num_pages, fc->max_pages);
num_pages        1663 fs/fuse/dev.c  	args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
num_pages        1671 fs/fuse/dev.c  	ap->descs = (void *) (ap->pages + num_pages);
num_pages        1682 fs/fuse/dev.c  	while (num && ap->num_pages < num_pages) {
num_pages        1691 fs/fuse/dev.c  		ap->pages[ap->num_pages] = page;
num_pages        1692 fs/fuse/dev.c  		ap->descs[ap->num_pages].offset = offset;
num_pages        1693 fs/fuse/dev.c  		ap->descs[ap->num_pages].length = this_num;
num_pages        1694 fs/fuse/dev.c  		ap->num_pages++;
num_pages        1215 fs/fuse/dir.c  		.num_pages = 1,
num_pages         376 fs/fuse/file.c 		if (idx_from < curr_index + wpa->ia.ap.num_pages &&
num_pages         576 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++) {
num_pages         771 fs/fuse/file.c 		for (i = start_idx; i < ap->num_pages; i++) {
num_pages         790 fs/fuse/file.c 		.ap.num_pages = 1,
num_pages         851 fs/fuse/file.c 	for (i = 0; mapping == NULL && i < ap->num_pages; i++)
num_pages         866 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++) {
num_pages         888 fs/fuse/file.c 	size_t count = ap->num_pages << PAGE_SHIFT;
num_pages         899 fs/fuse/file.c 		ap->descs[ap->num_pages - 1].length--;
num_pages         936 fs/fuse/file.c 	if (ap->num_pages &&
num_pages         937 fs/fuse/file.c 	    (ap->num_pages == fc->max_pages ||
num_pages         938 fs/fuse/file.c 	     (ap->num_pages + 1) * PAGE_SIZE > fc->max_read ||
num_pages         939 fs/fuse/file.c 	     ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
num_pages         951 fs/fuse/file.c 	if (WARN_ON(ap->num_pages >= data->max_pages)) {
num_pages         958 fs/fuse/file.c 	ap->pages[ap->num_pages] = page;
num_pages         959 fs/fuse/file.c 	ap->descs[ap->num_pages].length = PAGE_SIZE;
num_pages         960 fs/fuse/file.c 	ap->num_pages++;
num_pages         989 fs/fuse/file.c 		if (data.ia->ap.num_pages)
num_pages        1108 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++)
num_pages        1120 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++) {
num_pages        1186 fs/fuse/file.c 		ap->pages[ap->num_pages] = page;
num_pages        1187 fs/fuse/file.c 		ap->descs[ap->num_pages].length = tmp;
num_pages        1188 fs/fuse/file.c 		ap->num_pages++;
num_pages        1199 fs/fuse/file.c 		 ap->num_pages < max_pages && offset == 0);
num_pages        1387 fs/fuse/file.c 	while (nbytes < *nbytesp && ap->num_pages < max_pages) {
num_pages        1390 fs/fuse/file.c 		ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
num_pages        1392 fs/fuse/file.c 					max_pages - ap->num_pages,
num_pages        1403 fs/fuse/file.c 		ap->descs[ap->num_pages].offset = start;
num_pages        1404 fs/fuse/file.c 		fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
num_pages        1406 fs/fuse/file.c 		ap->num_pages += npages;
num_pages        1407 fs/fuse/file.c 		ap->descs[ap->num_pages - 1].length -=
num_pages        1598 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++)
num_pages        1618 fs/fuse/file.c 	for (i = 0; i < ap->num_pages; i++) {
num_pages        1636 fs/fuse/file.c 	__u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
num_pages        1803 fs/fuse/file.c 		ap->num_pages = 0;
num_pages        1847 fs/fuse/file.c 	ap->num_pages = 1;
num_pages        1924 fs/fuse/file.c 	memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
num_pages        1925 fs/fuse/file.c 	memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
num_pages        1939 fs/fuse/file.c 	int num_pages = wpa->ia.ap.num_pages;
num_pages        1948 fs/fuse/file.c 	for (i = 0; i < num_pages; i++)
num_pages        1967 fs/fuse/file.c 	WARN_ON(new_ap->num_pages != 0);
num_pages        1978 fs/fuse/file.c 	new_ap->num_pages = 1;
num_pages        1985 fs/fuse/file.c 			WARN_ON(tmp->ia.ap.num_pages != 1);
num_pages        2038 fs/fuse/file.c 	if (wpa && ap->num_pages &&
num_pages        2039 fs/fuse/file.c 	    (is_writeback || ap->num_pages == fc->max_pages ||
num_pages        2040 fs/fuse/file.c 	     (ap->num_pages + 1) * PAGE_SIZE > fc->max_write ||
num_pages        2041 fs/fuse/file.c 	     data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)) {
num_pages        2044 fs/fuse/file.c 	} else if (wpa && ap->num_pages == data->max_pages) {
num_pages        2084 fs/fuse/file.c 		ap->num_pages = 0;
num_pages        2096 fs/fuse/file.c 	ap->pages[ap->num_pages] = tmp_page;
num_pages        2097 fs/fuse/file.c 	ap->descs[ap->num_pages].offset = 0;
num_pages        2098 fs/fuse/file.c 	ap->descs[ap->num_pages].length = PAGE_SIZE;
num_pages        2109 fs/fuse/file.c 	data->orig_pages[ap->num_pages] = page;
num_pages        2116 fs/fuse/file.c 	ap->num_pages++;
num_pages        2151 fs/fuse/file.c 		WARN_ON(!data.wpa->ia.ap.num_pages);
num_pages        2790 fs/fuse/file.c 	while (ap.num_pages < max_pages) {
num_pages        2791 fs/fuse/file.c 		ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
num_pages        2792 fs/fuse/file.c 		if (!ap.pages[ap.num_pages])
num_pages        2794 fs/fuse/file.c 		ap.num_pages++;
num_pages        2811 fs/fuse/file.c 		for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
num_pages        2880 fs/fuse/file.c 	for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
num_pages        2888 fs/fuse/file.c 	while (ap.num_pages)
num_pages        2889 fs/fuse/file.c 		__free_page(ap.pages[--ap.num_pages]);
num_pages         261 fs/fuse/fuse_i.h 	unsigned int num_pages;
num_pages         336 fs/fuse/readdir.c 	ap->num_pages = 1;
num_pages         486 fs/fuse/virtio_fs.c 			for (i = 0; i < ap->num_pages; i++) {
num_pages         809 fs/fuse/virtio_fs.c 		total_sgs += ap->num_pages;
num_pages         820 fs/fuse/virtio_fs.c 		total_sgs += ap->num_pages;
num_pages         829 fs/fuse/virtio_fs.c 				       unsigned int num_pages,
num_pages         835 fs/fuse/virtio_fs.c 	for (i = 0; i < num_pages && total_len; i++) {
num_pages         865 fs/fuse/virtio_fs.c 						ap->num_pages,
num_pages        7051 fs/ocfs2/alloc.c 	int ret, i, has_data, num_pages = 0;
num_pages        7131 fs/ocfs2/alloc.c 		ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
num_pages        7153 fs/ocfs2/alloc.c 		for (i = 0; i < num_pages; i++)
num_pages        7187 fs/ocfs2/alloc.c 		ocfs2_unlock_and_free_pages(pages, num_pages);
num_pages         790 fs/ocfs2/aops.c void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
num_pages         794 fs/ocfs2/aops.c 	for(i = 0; i < num_pages; i++) {
num_pages          22 fs/ocfs2/aops.h void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
num_pages          38 include/drm/drm_cache.h void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
num_pages          99 include/drm/ttm/ttm_bo_api.h 	unsigned long num_pages;
num_pages         182 include/drm/ttm/ttm_bo_api.h 	unsigned long num_pages;
num_pages         701 include/drm/ttm/ttm_bo_api.h 		unsigned long num_pages, struct ttm_bo_kmap_obj *map);
num_pages          97 include/drm/ttm/ttm_memory.h 			uint64_t num_pages, struct ttm_operation_ctx *ctx);
num_pages         110 include/drm/ttm/ttm_tt.h 	unsigned long num_pages;
num_pages         304 include/linux/ceph/libceph.h extern void ceph_release_page_vector(struct page **pages, int num_pages);
num_pages         305 include/linux/ceph/libceph.h extern void ceph_put_page_vector(struct page **pages, int num_pages,
num_pages         307 include/linux/ceph/libceph.h extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
num_pages         127 include/linux/efi.h 	u64 num_pages;
num_pages         624 include/linux/mlx5/device.h 	__be32		num_pages;
num_pages        4767 include/linux/mlx5/mlx5_ifc.h 	u8         num_pages[0x20];
num_pages        9033 include/linux/mlx5/mlx5_ifc.h 	u8         num_pages[0x20];
num_pages         291 include/linux/qed/qed_rdma_if.h 	u16 num_pages;
num_pages         111 include/linux/tee_drv.h 			    struct page **pages, size_t num_pages,
num_pages         196 include/linux/tee_drv.h 	size_t num_pages;
num_pages         421 include/linux/tee_drv.h 					      size_t *num_pages)
num_pages         423 include/linux/tee_drv.h 	*num_pages = shm->num_pages;
num_pages         162 include/trace/events/hswadsp.h 		__field(	int,	num_pages	)
num_pages         171 include/trace/events/hswadsp.h 		__entry->num_pages = stream->request.ringinfo.num_pages;
num_pages         179 include/trace/events/hswadsp.h 		(int)__entry->num_pages, (int)__entry->ring_size,
num_pages         500 include/uapi/linux/kvm.h 	__u32 num_pages;
num_pages          47 include/uapi/linux/virtio_balloon.h 	__u32 num_pages;
num_pages          63 include/uapi/linux/xdp_diag.h 	__u32	num_pages;
num_pages          88 include/uapi/rdma/ocrdma-abi.h 	__u32 num_pages;
num_pages          42 include/xen/xen-front-pgdir-shbuf.h 	int num_pages;
num_pages          62 include/xen/xen-front-pgdir-shbuf.h 	int num_pages;
num_pages        1961 kernel/module.c 		      int (*set_memory)(unsigned long start, int num_pages))
num_pages        1971 kernel/module.c 			int (*set_memory)(unsigned long start, int num_pages))
num_pages        1981 kernel/module.c 				int (*set_memory)(unsigned long start, int num_pages))
num_pages        1991 kernel/module.c 			       int (*set_memory)(unsigned long start, int num_pages))
num_pages        3254 net/ceph/messenger.c 		int num_pages = calc_pages_for(data->alignment, data->length);
num_pages        3255 net/ceph/messenger.c 		ceph_release_page_vector(data->pages, num_pages);
num_pages         358 net/ceph/osd_client.c 		int num_pages;
num_pages         360 net/ceph/osd_client.c 		num_pages = calc_pages_for((u64)osd_data->alignment,
num_pages         362 net/ceph/osd_client.c 		ceph_release_page_vector(osd_data->pages, num_pages);
num_pages        5241 net/ceph/osd_client.c 			struct page **pages, int num_pages, int page_align)
num_pages        5281 net/ceph/osd_client.c 			 struct page **pages, int num_pages)
num_pages          13 net/ceph/pagevec.c void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
num_pages          17 net/ceph/pagevec.c 	for (i = 0; i < num_pages; i++) {
num_pages          26 net/ceph/pagevec.c void ceph_release_page_vector(struct page **pages, int num_pages)
num_pages          30 net/ceph/pagevec.c 	for (i = 0; i < num_pages; i++)
num_pages          39 net/ceph/pagevec.c struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
num_pages          44 net/ceph/pagevec.c 	pages = kmalloc_array(num_pages, sizeof(*pages), flags);
num_pages          47 net/ceph/pagevec.c 	for (i = 0; i < num_pages; i++) {
num_pages          58 net/xdp/xsk_diag.c 	du.num_pages = umem->npgs;
num_pages          31 sound/soc/amd/raven/acp3x-pcm-dma.c 	u16 num_pages;
num_pages         221 sound/soc/amd/raven/acp3x-pcm-dma.c 		val = rtd->num_pages * 8;
num_pages         229 sound/soc/amd/raven/acp3x-pcm-dma.c 	for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
num_pages         252 sound/soc/amd/raven/acp3x-pcm-dma.c 		acp_fifo_addr = ACP_SRAM_PTE_OFFSET + (rtd->num_pages * 8)
num_pages         267 sound/soc/amd/raven/acp3x-pcm-dma.c 				(rtd->num_pages * 8) + CAPTURE_FIFO_ADDR_OFFSET;
num_pages         356 sound/soc/amd/raven/acp3x-pcm-dma.c 		rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
num_pages        1126 sound/soc/intel/haswell/sst-haswell-ipc.c 	u32 ring_pt_address, u32 num_pages,
num_pages        1135 sound/soc/intel/haswell/sst-haswell-ipc.c 	stream->request.ringinfo.num_pages = num_pages;
num_pages         270 sound/soc/intel/haswell/sst-haswell-ipc.h 	u32 num_pages;
num_pages         429 sound/soc/intel/haswell/sst-haswell-ipc.h 	u32 ring_pt_address, u32 num_pages,
num_pages          33 sound/xen/xen_snd_front_alsa.c 	int num_pages;
num_pages         228 sound/xen/xen_snd_front_alsa.c 	stream->num_pages = 0;
num_pages         449 sound/xen/xen_snd_front_alsa.c 	stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
num_pages         450 sound/xen/xen_snd_front_alsa.c 	stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
num_pages         455 sound/xen/xen_snd_front_alsa.c 	for (i = 0; i < stream->num_pages; i++)
num_pages         481 sound/xen/xen_snd_front_alsa.c 	buf_cfg.num_pages = stream->num_pages;
num_pages         500 tools/include/uapi/linux/kvm.h 	__u32 num_pages;
num_pages          25 tools/testing/scatterlist/main.c 		unsigned num_pages;
num_pages          60 tools/testing/scatterlist/main.c 		set_pages(pages, test->pfn, test->num_pages);
num_pages          62 tools/testing/scatterlist/main.c 		ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages,
num_pages          78 tools/testing/selftests/kvm/include/kvm_util.h 			    uint64_t first_page, uint32_t num_pages);
num_pages         288 tools/testing/selftests/kvm/lib/kvm_util.c 			    uint64_t first_page, uint32_t num_pages)
num_pages         292 tools/testing/selftests/kvm/lib/kvm_util.c 	                                    .num_pages = num_pages };
num_pages        1327 virt/kvm/kvm_main.c 	n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
num_pages        1330 virt/kvm/kvm_main.c 	    log->num_pages > memslot->npages - log->first_page ||
num_pages        1331 virt/kvm/kvm_main.c 	    (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
num_pages        1341 virt/kvm/kvm_main.c 		 n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;