npages 270 arch/alpha/kernel/pci_iommu.c long npages, dma_ofs, i; npages 313 arch/alpha/kernel/pci_iommu.c npages = iommu_num_pages(paddr, size, PAGE_SIZE); npages 318 arch/alpha/kernel/pci_iommu.c dma_ofs = iommu_arena_alloc(dev, arena, npages, align); npages 326 arch/alpha/kernel/pci_iommu.c for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) npages 333 arch/alpha/kernel/pci_iommu.c cpu_addr, size, npages, ret, __builtin_return_address(0)); npages 391 arch/alpha/kernel/pci_iommu.c long dma_ofs, npages; npages 424 arch/alpha/kernel/pci_iommu.c npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); npages 428 arch/alpha/kernel/pci_iommu.c iommu_arena_free(arena, dma_ofs, npages); npages 439 arch/alpha/kernel/pci_iommu.c dma_addr, size, npages, __builtin_return_address(0)); npages 575 arch/alpha/kernel/pci_iommu.c long npages, dma_ofs, i; npages 608 arch/alpha/kernel/pci_iommu.c npages = iommu_num_pages(paddr, size, PAGE_SIZE); npages 609 arch/alpha/kernel/pci_iommu.c dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); npages 625 arch/alpha/kernel/pci_iommu.c __va(paddr), size, out->dma_address, npages); npages 644 arch/alpha/kernel/pci_iommu.c npages = iommu_num_pages(paddr, size, PAGE_SIZE); npages 647 arch/alpha/kernel/pci_iommu.c for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) npages 653 arch/alpha/kernel/pci_iommu.c last_sg->length, npages); npages 773 arch/alpha/kernel/pci_iommu.c long npages, ofs; npages 799 arch/alpha/kernel/pci_iommu.c npages = iommu_num_pages(addr, size, PAGE_SIZE); npages 801 arch/alpha/kernel/pci_iommu.c iommu_arena_free(arena, ofs, npages); npages 370 arch/arm/kernel/process.c unsigned int npages) npages 380 arch/arm/kernel/process.c last = TASK_SIZE - (npages << PAGE_SHIFT); npages 419 arch/arm/kernel/process.c unsigned long npages; npages 429 arch/arm/kernel/process.c npages = 1; /* for sigpage */ npages 430 arch/arm/kernel/process.c npages += vdso_total_pages; npages 434 arch/arm/kernel/process.c hint = sigpage_addr(mm, npages); npages 435 arch/arm/kernel/process.c addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0); npages 192 arch/mips/kvm/mips.c unsigned long npages) npages 217 arch/mips/kvm/mips.c slot->base_gfn + slot->npages - 1); npages 258 arch/mips/kvm/mips.c new->base_gfn + new->npages - 1); npages 490 arch/mips/kvm/mmu.c (memslot->npages << PAGE_SHIFT)); npages 151 arch/parisc/kernel/inventory.c unsigned long npages; npages 166 arch/parisc/kernel/inventory.c npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT); npages 167 arch/parisc/kernel/inventory.c set_pmem_entry(pmem_ranges,0UL,npages); npages 257 arch/parisc/mm/init.c unsigned long npages; npages 262 arch/parisc/mm/init.c npages = pmem_ranges[i].pages; npages 265 arch/parisc/mm/init.c size = npages << PAGE_SHIFT; npages 270 arch/parisc/mm/init.c if ((start_pfn + npages) > max_pfn) npages 271 arch/parisc/mm/init.c max_pfn = start_pfn + npages; npages 41 arch/powerpc/include/asm/iommu.h long index, long npages, npages 65 arch/powerpc/include/asm/iommu.h long index, long npages); npages 298 arch/powerpc/include/asm/iommu.h unsigned long ioba, unsigned long npages); npages 302 arch/powerpc/include/asm/iommu.h #define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ npages 305 arch/powerpc/include/asm/iommu.h (ioba), (npages)) || (tce_value)) npages 493 arch/powerpc/include/asm/kvm_book3s_64.h return !(memslot->base_gfn & mask) && !(memslot->npages & mask); npages 560 arch/powerpc/include/asm/kvm_book3s_64.h unsigned long npages) npages 563 arch/powerpc/include/asm/kvm_book3s_64.h if (npages >= 8) npages 564 arch/powerpc/include/asm/kvm_book3s_64.h memset((char *)map + i / 8, 0xff, npages / 8); npages 566 arch/powerpc/include/asm/kvm_book3s_64.h for (; npages; ++i, --npages) npages 571 arch/powerpc/include/asm/kvm_book3s_64.h unsigned long npages) npages 573 arch/powerpc/include/asm/kvm_book3s_64.h if (npages >= 8) npages 574 arch/powerpc/include/asm/kvm_book3s_64.h memset((char *)map + i / 8, 0xff, npages / 8); npages 576 arch/powerpc/include/asm/kvm_book3s_64.h for (; npages; ++i, --npages) npages 185 arch/powerpc/include/asm/kvm_ppc.h #define kvmppc_ioba_validate(stt, ioba, npages) \ npages 187 arch/powerpc/include/asm/kvm_ppc.h (stt)->size, (ioba), (npages)) ? \ npages 193 arch/powerpc/include/asm/kvm_ppc.h unsigned long tce_list, unsigned long npages); npages 196 arch/powerpc/include/asm/kvm_ppc.h unsigned long tce_value, unsigned long npages); npages 208 arch/powerpc/include/asm/kvm_ppc.h unsigned long npages); npages 298 arch/powerpc/include/asm/kvm_ppc.h unsigned long npages); npages 757 arch/powerpc/include/asm/kvm_ppc.h unsigned long tce_list, unsigned long npages); npages 760 arch/powerpc/include/asm/kvm_ppc.h unsigned long tce_value, unsigned long npages); npages 242 arch/powerpc/include/asm/opal.h uint64_t dma_addr, uint32_t npages); npages 34 arch/powerpc/include/asm/ultravisor.h static inline int uv_share_page(u64 pfn, u64 npages) npages 36 arch/powerpc/include/asm/ultravisor.h return ucall_norets(UV_SHARE_PAGE, pfn, npages); npages 39 arch/powerpc/include/asm/ultravisor.h static inline int uv_unshare_page(u64 pfn, u64 npages) npages 41 arch/powerpc/include/asm/ultravisor.h return ucall_norets(UV_UNSHARE_PAGE, pfn, npages); npages 165 arch/powerpc/kernel/iommu.c unsigned long npages, npages 172 arch/powerpc/kernel/iommu.c int largealloc = npages > 15; npages 185 arch/powerpc/kernel/iommu.c if (unlikely(npages == 0)) { npages 246 arch/powerpc/kernel/iommu.c n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, npages 272 arch/powerpc/kernel/iommu.c end = n + npages; npages 294 arch/powerpc/kernel/iommu.c void *page, unsigned int npages, npages 303 arch/powerpc/kernel/iommu.c entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); npages 312 arch/powerpc/kernel/iommu.c build_fail = tbl->it_ops->set(tbl, entry, npages, npages 322 arch/powerpc/kernel/iommu.c __iommu_free(tbl, ret, npages); npages 337 arch/powerpc/kernel/iommu.c unsigned int npages) npages 344 arch/powerpc/kernel/iommu.c if (((free_entry + npages) > tbl->it_size) || npages 384 arch/powerpc/kernel/iommu.c unsigned int npages) npages 395 arch/powerpc/kernel/iommu.c if (!iommu_free_check(tbl, dma_addr, npages)) npages 398 arch/powerpc/kernel/iommu.c tbl->it_ops->clear(tbl, entry, npages); npages 401 arch/powerpc/kernel/iommu.c bitmap_clear(tbl->it_map, free_entry, npages); npages 406 arch/powerpc/kernel/iommu.c unsigned int npages) npages 408 arch/powerpc/kernel/iommu.c __iommu_free(tbl, dma_addr, npages); npages 447 arch/powerpc/kernel/iommu.c unsigned long vaddr, npages, entry, slen; npages 457 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); npages 462 arch/powerpc/kernel/iommu.c entry = iommu_range_alloc(dev, tbl, npages, &handle, npages 473 arch/powerpc/kernel/iommu.c npages); npages 483 arch/powerpc/kernel/iommu.c npages, entry, dma_addr); npages 486 arch/powerpc/kernel/iommu.c build_fail = tbl->it_ops->set(tbl, entry, npages, npages 547 arch/powerpc/kernel/iommu.c unsigned long vaddr, npages; npages 550 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(s->dma_address, s->dma_length, npages 552 arch/powerpc/kernel/iommu.c __iommu_free(tbl, vaddr, npages); npages 576 arch/powerpc/kernel/iommu.c unsigned int npages; npages 581 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(dma_handle, sg->dma_length, npages 583 arch/powerpc/kernel/iommu.c __iommu_free(tbl, dma_handle, npages); npages 802 arch/powerpc/kernel/iommu.c unsigned int npages, align; npages 810 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); npages 816 arch/powerpc/kernel/iommu.c dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, npages 824 arch/powerpc/kernel/iommu.c npages); npages 837 arch/powerpc/kernel/iommu.c unsigned int npages; npages 842 arch/powerpc/kernel/iommu.c npages = iommu_num_pages(dma_handle, size, npages 844 arch/powerpc/kernel/iommu.c iommu_free(tbl, dma_handle, npages); npages 987 arch/powerpc/kernel/iommu.c unsigned long ioba, unsigned long npages) npages 844 arch/powerpc/kvm/book3s.c unsigned long npages) npages 846 arch/powerpc/kvm/book3s.c return kvm->arch.kvm_ops->create_memslot(slot, npages); npages 208 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long npages; npages 218 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = memslot->npages >> (porder - PAGE_SHIFT); npages 221 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages > 1ul << (40 - porder)) npages 222 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = 1ul << (40 - porder); npages 224 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) npages 225 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; npages 232 arch/powerpc/kvm/book3s_64_mmu_hv.c for (i = 0; i < npages; ++i) { npages 505 arch/powerpc/kvm/book3s_64_mmu_hv.c long index, ret, npages; npages 592 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); npages 593 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages < 1) { npages 760 arch/powerpc/kvm/book3s_64_mmu_hv.c memslot->npages * sizeof(*memslot->arch.rmap)); npages 786 arch/powerpc/kvm/book3s_64_mmu_hv.c (memslot->npages << PAGE_SHIFT)); npages 914 arch/powerpc/kvm/book3s_64_mmu_hv.c for (n = memslot->npages; n; --n, ++gfn) { npages 1139 arch/powerpc/kvm/book3s_64_mmu_hv.c gfn >= memslot->base_gfn + memslot->npages) npages 1155 arch/powerpc/kvm/book3s_64_mmu_hv.c for (i = 0; i < memslot->npages; ++i) { npages 1156 arch/powerpc/kvm/book3s_64_mmu_hv.c int npages = kvm_test_clear_dirty_npages(kvm, rmapp); npages 1162 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages) npages 1163 arch/powerpc/kvm/book3s_64_mmu_hv.c set_dirty_bits(map, i, npages); npages 1176 arch/powerpc/kvm/book3s_64_mmu_hv.c int npages; npages 1185 arch/powerpc/kvm/book3s_64_mmu_hv.c npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages); npages 1186 arch/powerpc/kvm/book3s_64_mmu_hv.c if (npages < 1) npages 1056 arch/powerpc/kvm/book3s_64_mmu_radix.c int npages; npages 1058 arch/powerpc/kvm/book3s_64_mmu_radix.c for (i = 0; i < memslot->npages; i = j) { npages 1059 arch/powerpc/kvm/book3s_64_mmu_radix.c npages = kvm_radix_test_clear_dirty(kvm, memslot, i); npages 1069 arch/powerpc/kvm/book3s_64_mmu_radix.c if (npages) { npages 1070 arch/powerpc/kvm/book3s_64_mmu_radix.c set_dirty_bits(map, i, npages); npages 1071 arch/powerpc/kvm/book3s_64_mmu_radix.c j = i + npages; npages 1087 arch/powerpc/kvm/book3s_64_mmu_radix.c for (n = memslot->npages; n; --n) { npages 181 arch/powerpc/kvm/book3s_64_vio.c unsigned long i, npages = kvmppc_tce_pages(stt->size); npages 183 arch/powerpc/kvm/book3s_64_vio.c for (i = 0; i < npages; i++) npages 275 arch/powerpc/kvm/book3s_64_vio.c unsigned long npages, size = args->size; npages 282 arch/powerpc/kvm/book3s_64_vio.c npages = kvmppc_tce_pages(size); npages 283 arch/powerpc/kvm/book3s_64_vio.c ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true); npages 288 arch/powerpc/kvm/book3s_64_vio.c stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), npages 329 arch/powerpc/kvm/book3s_64_vio.c account_locked_vm(current->mm, kvmppc_stt_pages(npages), false); npages 602 arch/powerpc/kvm/book3s_64_vio.c unsigned long tce_list, unsigned long npages) npages 620 arch/powerpc/kvm/book3s_64_vio.c if (npages > 512) npages 626 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_ioba_validate(stt, ioba, npages); npages 637 arch/powerpc/kvm/book3s_64_vio.c for (i = 0; i < npages; ++i) { npages 649 arch/powerpc/kvm/book3s_64_vio.c for (i = 0; i < npages; ++i) { npages 688 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_kill(stit->tbl, entry, npages); npages 699 arch/powerpc/kvm/book3s_64_vio.c unsigned long tce_value, unsigned long npages) npages 709 arch/powerpc/kvm/book3s_64_vio.c ret = kvmppc_ioba_validate(stt, ioba, npages); npages 720 arch/powerpc/kvm/book3s_64_vio.c for (i = 0; i < npages; ++i) { npages 735 arch/powerpc/kvm/book3s_64_vio.c for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) npages 740 arch/powerpc/kvm/book3s_64_vio.c iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages); npages 196 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long ioba, unsigned long npages, bool clearing) npages 199 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages); npages 212 arch/powerpc/kvm/book3s_64_vio_hv.c sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / npages 479 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long tce_list, unsigned long npages) npages 501 arch/powerpc/kvm/book3s_64_vio_hv.c if (npages > 512) npages 507 arch/powerpc/kvm/book3s_64_vio_hv.c ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false); npages 557 arch/powerpc/kvm/book3s_64_vio_hv.c for (i = 0; i < npages; ++i) { npages 565 arch/powerpc/kvm/book3s_64_vio_hv.c for (i = 0; i < npages; ++i) { npages 591 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_kill_rm(stit->tbl, entry, npages); npages 602 arch/powerpc/kvm/book3s_64_vio_hv.c unsigned long tce_value, unsigned long npages) npages 616 arch/powerpc/kvm/book3s_64_vio_hv.c ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0); npages 627 arch/powerpc/kvm/book3s_64_vio_hv.c for (i = 0; i < npages; ++i) { npages 642 arch/powerpc/kvm/book3s_64_vio_hv.c for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) npages 647 arch/powerpc/kvm/book3s_64_vio_hv.c iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages); npages 813 arch/powerpc/kvm/book3s_hv.c if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) npages 825 arch/powerpc/kvm/book3s_hv.c if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) npages 4468 arch/powerpc/kvm/book3s_hv.c unsigned long npages) npages 4470 arch/powerpc/kvm/book3s_hv.c slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap))); npages 4490 arch/powerpc/kvm/book3s_hv.c unsigned long npages = mem->memory_size >> PAGE_SHIFT; npages 4498 arch/powerpc/kvm/book3s_hv.c if (npages) npages 903 arch/powerpc/kvm/book3s_hv_nested.c for (page = 0; page < free->npages; page++) { npages 978 arch/powerpc/kvm/book3s_hv_nested.c long npages; npages 989 arch/powerpc/kvm/book3s_hv_nested.c npages = 1UL << (shift - PAGE_SHIFT); npages 1000 arch/powerpc/kvm/book3s_hv_nested.c npages -= 1UL << (shadow_shift - PAGE_SHIFT); npages 1002 arch/powerpc/kvm/book3s_hv_nested.c } while (npages > 0); npages 112 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long npages; npages 116 arch/powerpc/kvm/book3s_hv_rm_mmu.c npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE; npages 118 arch/powerpc/kvm/book3s_hv_rm_mmu.c set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); npages 468 arch/powerpc/kvm/book3s_hv_rm_mmu.c long npages, int global, bool need_sync) npages 480 arch/powerpc/kvm/book3s_hv_rm_mmu.c for (i = 0; i < npages; ++i) { npages 490 arch/powerpc/kvm/book3s_hv_rm_mmu.c for (i = 0; i < npages; ++i) { npages 407 arch/powerpc/kvm/book3s_pr.c (memslot->npages << PAGE_SHIFT)); npages 1885 arch/powerpc/kvm/book3s_pr.c ga_end = ga + (memslot->npages << PAGE_SHIFT); npages 1929 arch/powerpc/kvm/book3s_pr.c unsigned long npages) npages 304 arch/powerpc/kvm/book3s_pr_papr.c unsigned long npages = kvmppc_get_gpr(vcpu, 7); npages 308 arch/powerpc/kvm/book3s_pr_papr.c tce, npages); npages 320 arch/powerpc/kvm/book3s_pr_papr.c unsigned long npages = kvmppc_get_gpr(vcpu, 7); npages 323 arch/powerpc/kvm/book3s_pr_papr.c rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages); npages 1810 arch/powerpc/kvm/booke.c unsigned long npages) npages 382 arch/powerpc/kvm/e500_mmu_host.c slot_end = slot_start + slot->npages; npages 691 arch/powerpc/kvm/powerpc.c unsigned long npages) npages 693 arch/powerpc/kvm/powerpc.c return kvmppc_core_create_memslot(kvm, slot, npages); npages 54 arch/powerpc/mm/book3s64/subpage_prot.c int npages) npages 73 arch/powerpc/mm/book3s64/subpage_prot.c for (; npages > 0; --npages) { npages 154 arch/powerpc/platforms/cell/iommu.c static int tce_build_cell(struct iommu_table *tbl, long index, long npages, npages 188 arch/powerpc/platforms/cell/iommu.c for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) npages 193 arch/powerpc/platforms/cell/iommu.c invalidate_tce_cache(window->iommu, io_pte, npages); npages 196 arch/powerpc/platforms/cell/iommu.c index, npages, direction, base_pte); npages 200 arch/powerpc/platforms/cell/iommu.c static void tce_free_cell(struct iommu_table *tbl, long index, long npages) npages 208 arch/powerpc/platforms/cell/iommu.c pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); npages 222 arch/powerpc/platforms/cell/iommu.c for (i = 0; i < npages; i++) npages 227 arch/powerpc/platforms/cell/iommu.c invalidate_tce_cache(window->iommu, io_pte, npages); npages 77 arch/powerpc/platforms/pasemi/iommu.c long npages, unsigned long uaddr, npages 85 arch/powerpc/platforms/pasemi/iommu.c pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr); npages 91 arch/powerpc/platforms/pasemi/iommu.c while (npages--) { npages 106 arch/powerpc/platforms/pasemi/iommu.c long npages) npages 111 arch/powerpc/platforms/pasemi/iommu.c pr_debug("iobmap: free at: %lx, %lx\n", index, npages); npages 117 arch/powerpc/platforms/pasemi/iommu.c while (npages--) { npages 96 arch/powerpc/platforms/powernv/pci-ioda-tce.c int pnv_tce_build(struct iommu_table *tbl, long index, long npages, npages 107 arch/powerpc/platforms/powernv/pci-ioda-tce.c for (i = 0; i < npages; i++) { npages 163 arch/powerpc/platforms/powernv/pci-ioda-tce.c void pnv_tce_free(struct iommu_table *tbl, long index, long npages) npages 167 arch/powerpc/platforms/powernv/pci-ioda-tce.c for (i = 0; i < npages; i++) { npages 1902 arch/powerpc/platforms/powernv/pci-ioda.c unsigned long index, unsigned long npages, bool rm) npages 1914 arch/powerpc/platforms/powernv/pci-ioda.c npages - 1); npages 1939 arch/powerpc/platforms/powernv/pci-ioda.c long npages, unsigned long uaddr, npages 1943 arch/powerpc/platforms/powernv/pci-ioda.c int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, npages 1947 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); npages 1963 arch/powerpc/platforms/powernv/pci-ioda.c long npages) npages 1965 arch/powerpc/platforms/powernv/pci-ioda.c pnv_tce_free(tbl, index, npages); npages 1967 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false); npages 2009 arch/powerpc/platforms/powernv/pci-ioda.c unsigned long npages) npages 2021 arch/powerpc/platforms/powernv/pci-ioda.c end |= ((index + npages - 1) << shift); npages 2046 arch/powerpc/platforms/powernv/pci-ioda.c unsigned long index, unsigned long npages, bool rm) npages 2072 arch/powerpc/platforms/powernv/pci-ioda.c index, npages); npages 2077 arch/powerpc/platforms/powernv/pci-ioda.c index << shift, npages); npages 2090 arch/powerpc/platforms/powernv/pci-ioda.c long npages, unsigned long uaddr, npages 2094 arch/powerpc/platforms/powernv/pci-ioda.c int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, npages 2098 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); npages 2104 arch/powerpc/platforms/powernv/pci-ioda.c long npages) npages 2106 arch/powerpc/platforms/powernv/pci-ioda.c pnv_tce_free(tbl, index, npages); npages 2108 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false); npages 225 arch/powerpc/platforms/powernv/pci.h extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, npages 228 arch/powerpc/platforms/powernv/pci.h extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); npages 89 arch/powerpc/platforms/pseries/iommu.c long npages, unsigned long uaddr, npages 104 arch/powerpc/platforms/pseries/iommu.c while (npages--) { npages 116 arch/powerpc/platforms/pseries/iommu.c static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) npages 122 arch/powerpc/platforms/pseries/iommu.c while (npages--) npages 139 arch/powerpc/platforms/pseries/iommu.c long npages, unsigned long uaddr, npages 147 arch/powerpc/platforms/pseries/iommu.c long tcenum_start = tcenum, npages_start = npages; npages 154 arch/powerpc/platforms/pseries/iommu.c while (npages--) { npages 161 arch/powerpc/platforms/pseries/iommu.c (npages_start - (npages + 1))); npages 182 arch/powerpc/platforms/pseries/iommu.c long npages, unsigned long uaddr, npages 191 arch/powerpc/platforms/pseries/iommu.c long tcenum_start = tcenum, npages_start = npages; npages 195 arch/powerpc/platforms/pseries/iommu.c if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) { npages 197 arch/powerpc/platforms/pseries/iommu.c tbl->it_page_shift, npages, uaddr, npages 215 arch/powerpc/platforms/pseries/iommu.c npages, uaddr, direction, attrs); npages 231 arch/powerpc/platforms/pseries/iommu.c limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE); npages 243 arch/powerpc/platforms/pseries/iommu.c npages -= limit; npages 245 arch/powerpc/platforms/pseries/iommu.c } while (npages > 0 && !rc); npages 252 arch/powerpc/platforms/pseries/iommu.c (npages_start - (npages + limit))); npages 259 arch/powerpc/platforms/pseries/iommu.c printk("\tnpages = 0x%llx\n", (u64)npages); npages 266 arch/powerpc/platforms/pseries/iommu.c static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages) npages 270 arch/powerpc/platforms/pseries/iommu.c while (npages--) { npages 285 arch/powerpc/platforms/pseries/iommu.c static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) npages 290 arch/powerpc/platforms/pseries/iommu.c return tce_free_pSeriesLP(tbl->it_index, tcenum, npages); npages 292 arch/powerpc/platforms/pseries/iommu.c rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); npages 298 arch/powerpc/platforms/pseries/iommu.c printk("\tnpages = 0x%llx\n", (u64)npages); npages 410 arch/powerpc/platforms/pseries/iommu.c unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift; npages 414 arch/powerpc/platforms/pseries/iommu.c tcenum, tceshift, npages, (unsigned long) uaddr, npages 173 arch/powerpc/sysdev/dart_iommu.c long npages, unsigned long uaddr, npages 181 arch/powerpc/sysdev/dart_iommu.c DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); npages 188 arch/powerpc/sysdev/dart_iommu.c l = npages; npages 196 arch/powerpc/sysdev/dart_iommu.c dart_cache_sync(orig_dp, npages); npages 200 arch/powerpc/sysdev/dart_iommu.c while (npages--) npages 209 arch/powerpc/sysdev/dart_iommu.c static void dart_free(struct iommu_table *tbl, long index, long npages) npages 212 arch/powerpc/sysdev/dart_iommu.c long orig_npages = npages; npages 219 arch/powerpc/sysdev/dart_iommu.c DBG("dart: free at: %lx, %lx\n", index, npages); npages 223 arch/powerpc/sysdev/dart_iommu.c while (npages--) npages 586 arch/s390/kvm/kvm-s390.c last_gfn = memslot->base_gfn + memslot->npages; npages 1030 arch/s390/kvm/kvm-s390.c ram_pages += ms->npages; npages 1923 arch/s390/kvm/kvm-s390.c gfn < memslots[slot].base_gfn + memslots[slot].npages) npages 1939 arch/s390/kvm/kvm-s390.c gfn < memslots[start].base_gfn + memslots[start].npages) { npages 1976 arch/s390/kvm/kvm-s390.c if (ms->base_gfn + ms->npages <= cur_gfn) { npages 1985 arch/s390/kvm/kvm-s390.c ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); npages 1986 arch/s390/kvm/kvm-s390.c while ((slotidx > 0) && (ofs >= ms->npages)) { npages 1989 arch/s390/kvm/kvm-s390.c ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0); npages 2008 arch/s390/kvm/kvm-s390.c mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages; npages 2033 arch/s390/kvm/kvm-s390.c if (cur_gfn - ms->base_gfn >= ms->npages) { npages 4499 arch/s390/kvm/kvm-s390.c unsigned long npages) npages 4538 arch/s390/kvm/kvm-s390.c old->npages * PAGE_SIZE); npages 4542 arch/s390/kvm/kvm-s390.c old->npages * PAGE_SIZE); npages 381 arch/s390/pci/pci_dma.c int npages, ret; npages 383 arch/s390/pci/pci_dma.c npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); npages 385 arch/s390/pci/pci_dma.c ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, npages 393 arch/s390/pci/pci_dma.c atomic64_add(npages, &zdev->unmapped_pages); npages 394 arch/s390/pci/pci_dma.c dma_free_address(dev, dma_addr, npages); npages 44 arch/sparc/include/asm/iommu-common.h unsigned long npages, npages 50 arch/sparc/include/asm/iommu-common.h u64 dma_addr, unsigned long npages, npages 99 arch/sparc/kernel/iommu-common.c unsigned long npages, npages 112 arch/sparc/kernel/iommu-common.c bool largealloc = (large_pool && npages > iommu_large_alloc); npages 120 arch/sparc/kernel/iommu-common.c if (unlikely(npages == 0)) { npages 185 arch/sparc/kernel/iommu-common.c n = iommu_area_alloc(iommu->map, limit, start, npages, shift, npages 215 arch/sparc/kernel/iommu-common.c end = n + npages; npages 251 arch/sparc/kernel/iommu-common.c unsigned long npages, unsigned long entry) npages 262 arch/sparc/kernel/iommu-common.c bitmap_clear(iommu->map, entry, npages); npages 158 arch/sparc/kernel/iommu.c unsigned long npages) npages 162 arch/sparc/kernel/iommu.c entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, npages 204 arch/sparc/kernel/iommu.c int npages, nid; npages 233 arch/sparc/kernel/iommu.c npages = size >> IO_PAGE_SHIFT; npages 235 arch/sparc/kernel/iommu.c while (npages--) { npages 251 arch/sparc/kernel/iommu.c unsigned long order, npages; npages 253 arch/sparc/kernel/iommu.c npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; npages 256 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); npages 271 arch/sparc/kernel/iommu.c unsigned long flags, npages, oaddr; npages 283 arch/sparc/kernel/iommu.c npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages 284 arch/sparc/kernel/iommu.c npages >>= IO_PAGE_SHIFT; npages 286 arch/sparc/kernel/iommu.c base = alloc_npages(dev, iommu, npages); npages 307 arch/sparc/kernel/iommu.c for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) npages 321 arch/sparc/kernel/iommu.c u32 vaddr, unsigned long ctx, unsigned long npages, npages 356 arch/sparc/kernel/iommu.c for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) npages 383 arch/sparc/kernel/iommu.c vaddr, ctx, npages); npages 393 arch/sparc/kernel/iommu.c unsigned long flags, npages, ctx, i; npages 404 arch/sparc/kernel/iommu.c npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages 405 arch/sparc/kernel/iommu.c npages >>= IO_PAGE_SHIFT; npages 420 arch/sparc/kernel/iommu.c npages, direction); npages 423 arch/sparc/kernel/iommu.c for (i = 0; i < npages; i++) npages 429 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); npages 479 arch/sparc/kernel/iommu.c unsigned long paddr, npages, entry, out_entry = 0, slen; npages 490 arch/sparc/kernel/iommu.c npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); npages 491 arch/sparc/kernel/iommu.c entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, npages 498 arch/sparc/kernel/iommu.c " npages %lx\n", iommu, paddr, npages); npages 511 arch/sparc/kernel/iommu.c while (npages--) { npages 559 arch/sparc/kernel/iommu.c unsigned long vaddr, npages, entry, j; npages 563 arch/sparc/kernel/iommu.c npages = iommu_num_pages(s->dma_address, s->dma_length, npages 570 arch/sparc/kernel/iommu.c for (j = 0; j < npages; j++) npages 573 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, vaddr, npages, npages 630 arch/sparc/kernel/iommu.c unsigned long npages, entry; npages 636 arch/sparc/kernel/iommu.c npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); npages 645 arch/sparc/kernel/iommu.c npages, direction); npages 647 arch/sparc/kernel/iommu.c for (i = 0; i < npages; i++) npages 650 arch/sparc/kernel/iommu.c iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, npages 666 arch/sparc/kernel/iommu.c unsigned long flags, ctx, npages; npages 676 arch/sparc/kernel/iommu.c npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages 677 arch/sparc/kernel/iommu.c npages >>= IO_PAGE_SHIFT; npages 693 arch/sparc/kernel/iommu.c strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); npages 704 arch/sparc/kernel/iommu.c unsigned long flags, ctx, npages, i; npages 737 arch/sparc/kernel/iommu.c npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) npages 739 arch/sparc/kernel/iommu.c strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); npages 1023 arch/sparc/kernel/ldc.c unsigned long entry, unsigned long npages) npages 1030 arch/sparc/kernel/ldc.c for (i = 0; i < npages; i++) { npages 2018 arch/sparc/kernel/ldc.c unsigned long npages) npages 2023 arch/sparc/kernel/ldc.c npages, NULL, (unsigned long)-1, 0); npages 2152 arch/sparc/kernel/ldc.c unsigned long i, npages; npages 2166 arch/sparc/kernel/ldc.c npages = err; npages 2172 arch/sparc/kernel/ldc.c base = alloc_npages(iommu, npages); npages 2198 arch/sparc/kernel/ldc.c unsigned long npages, pa; npages 2210 arch/sparc/kernel/ldc.c npages = pages_in_region(pa, len); npages 2214 arch/sparc/kernel/ldc.c base = alloc_npages(iommu, npages); npages 2236 arch/sparc/kernel/ldc.c unsigned long npages, entry; npages 2238 arch/sparc/kernel/ldc.c npages = PAGE_ALIGN(((cookie & ~PAGE_MASK) + size)) >> PAGE_SHIFT; npages 2241 arch/sparc/kernel/ldc.c ldc_demap(iommu, id, cookie, entry, npages); npages 2242 arch/sparc/kernel/ldc.c iommu_tbl_range_free(&iommu->iommu_map_table, cookie, npages, entry); npages 59 arch/sparc/kernel/pci_sun4v.c unsigned long npages; /* Number of pages in list. */ npages 73 arch/sparc/kernel/pci_sun4v.c p->npages = 0; npages 90 arch/sparc/kernel/pci_sun4v.c unsigned long npages = p->npages; npages 99 arch/sparc/kernel/pci_sun4v.c while (npages != 0) { npages 103 arch/sparc/kernel/pci_sun4v.c npages, npages 111 arch/sparc/kernel/pci_sun4v.c npages, prot, __pa(pglist), npages 116 arch/sparc/kernel/pci_sun4v.c index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), npages 134 arch/sparc/kernel/pci_sun4v.c npages -= num; npages 139 arch/sparc/kernel/pci_sun4v.c p->npages = 0; npages 148 arch/sparc/kernel/pci_sun4v.c if (p->entry + p->npages == entry) npages 160 arch/sparc/kernel/pci_sun4v.c BUG_ON(p->npages >= PGLIST_NENTS); npages 162 arch/sparc/kernel/pci_sun4v.c p->pglist[p->npages++] = phys_page; npages 163 arch/sparc/kernel/pci_sun4v.c if (p->npages == PGLIST_NENTS) npages 174 arch/sparc/kernel/pci_sun4v.c BUG_ON(p->npages >= PGLIST_NENTS); npages 184 arch/sparc/kernel/pci_sun4v.c unsigned long flags, order, first_page, npages, n; npages 198 arch/sparc/kernel/pci_sun4v.c npages = size >> IO_PAGE_SHIFT; npages 218 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, npages 235 arch/sparc/kernel/pci_sun4v.c for (n = 0; n < npages; n++) { npages 250 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); npages 296 arch/sparc/kernel/pci_sun4v.c unsigned long entry, unsigned long npages) npages 306 arch/sparc/kernel/pci_sun4v.c npages); npages 309 arch/sparc/kernel/pci_sun4v.c entry, npages, &num); npages 316 arch/sparc/kernel/pci_sun4v.c npages -= num; npages 317 arch/sparc/kernel/pci_sun4v.c } while (npages != 0); npages 328 arch/sparc/kernel/pci_sun4v.c unsigned long order, npages, entry; npages 332 arch/sparc/kernel/pci_sun4v.c npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; npages 346 arch/sparc/kernel/pci_sun4v.c dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); npages 347 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); npages 362 arch/sparc/kernel/pci_sun4v.c unsigned long flags, npages, oaddr; npages 375 arch/sparc/kernel/pci_sun4v.c npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages 376 arch/sparc/kernel/pci_sun4v.c npages >>= IO_PAGE_SHIFT; npages 384 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, npages 404 arch/sparc/kernel/pci_sun4v.c for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { npages 423 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); npages 435 arch/sparc/kernel/pci_sun4v.c unsigned long npages; npages 451 arch/sparc/kernel/pci_sun4v.c npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages 452 arch/sparc/kernel/pci_sun4v.c npages >>= IO_PAGE_SHIFT; npages 463 arch/sparc/kernel/pci_sun4v.c dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); npages 464 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); npages 523 arch/sparc/kernel/pci_sun4v.c unsigned long paddr, npages, entry, out_entry = 0, slen; npages 533 arch/sparc/kernel/pci_sun4v.c npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); npages 534 arch/sparc/kernel/pci_sun4v.c entry = iommu_tbl_range_alloc(dev, tbl, npages, npages 540 arch/sparc/kernel/pci_sun4v.c tbl, paddr, npages); npages 552 arch/sparc/kernel/pci_sun4v.c while (npages--) { npages 606 arch/sparc/kernel/pci_sun4v.c unsigned long vaddr, npages; npages 609 arch/sparc/kernel/pci_sun4v.c npages = iommu_num_pages(s->dma_address, s->dma_length, npages 611 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, vaddr, npages, npages 650 arch/sparc/kernel/pci_sun4v.c unsigned long npages; npages 656 arch/sparc/kernel/pci_sun4v.c npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); npages 667 arch/sparc/kernel/pci_sun4v.c entry, npages); npages 668 arch/sparc/kernel/pci_sun4v.c iommu_tbl_range_free(tbl, dma_handle, npages, npages 97 arch/sparc/mm/io-unit.c int i, j, k, npages; npages 101 arch/sparc/mm/io-unit.c npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; npages 104 arch/sparc/mm/io-unit.c switch (npages) { npages 110 arch/sparc/mm/io-unit.c IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); npages 117 arch/sparc/mm/io-unit.c if (scan + npages > limit) { npages 128 arch/sparc/mm/io-unit.c for (k = 1, scan++; k < npages; k++) npages 132 arch/sparc/mm/io-unit.c scan -= npages; npages 135 arch/sparc/mm/io-unit.c for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { npages 184 arch/sparc/mm/iommu.c unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 208 arch/sparc/mm/iommu.c ioptex = bit_map_string_get(&iommu->usemap, npages, pfn); npages 216 arch/sparc/mm/iommu.c for (i = 0; i < npages; i++) { npages 224 arch/sparc/mm/iommu.c iommu_flush_iotlb(iopte0, npages); npages 280 arch/sparc/mm/iommu.c unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT; npages 285 arch/sparc/mm/iommu.c for (i = 0; i < npages; i++) { npages 290 arch/sparc/mm/iommu.c bit_map_clear(&iommu->usemap, ioptex, npages); npages 984 arch/sparc/mm/srmmu.c unsigned long npages; npages 990 arch/sparc/mm/srmmu.c npages = max_low_pfn - pfn_base; npages 992 arch/sparc/mm/srmmu.c zones_size[ZONE_DMA] = npages; npages 993 arch/sparc/mm/srmmu.c zholes_size[ZONE_DMA] = npages - pages_avail; npages 995 arch/sparc/mm/srmmu.c npages = highend_pfn - max_low_pfn; npages 996 arch/sparc/mm/srmmu.c zones_size[ZONE_HIGHMEM] = npages; npages 997 arch/sparc/mm/srmmu.c zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); npages 1031 arch/x86/events/intel/pt.c unsigned long idx, npages, wakeup; npages 1054 arch/x86/events/intel/pt.c npages = handle->size >> PAGE_SHIFT; npages 1058 arch/x86/events/intel/pt.c npages++; npages 1060 arch/x86/events/intel/pt.c idx = (head >> PAGE_SHIFT) + npages; npages 1072 arch/x86/events/intel/pt.c idx = (head >> PAGE_SHIFT) + npages - 1; npages 55 arch/x86/include/asm/kvm_page_track.h unsigned long npages); npages 29 arch/x86/include/asm/tce.h unsigned int npages, unsigned long uaddr, int direction); npages 30 arch/x86/include/asm/tce.h extern void tce_free(struct iommu_table *tbl, long index, unsigned int npages); npages 203 arch/x86/kernel/amd_gart_64.c unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); npages 210 arch/x86/kernel/amd_gart_64.c iommu_page = alloc_iommu(dev, npages, align_mask); npages 220 arch/x86/kernel/amd_gart_64.c for (i = 0; i < npages; i++) { npages 253 arch/x86/kernel/amd_gart_64.c int npages; npages 269 arch/x86/kernel/amd_gart_64.c npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); npages 270 arch/x86/kernel/amd_gart_64.c for (i = 0; i < npages; i++) { npages 273 arch/x86/kernel/amd_gart_64.c free_iommu(iommu_page, npages); npages 751 arch/x86/kernel/ftrace.c unsigned long npages; npages 784 arch/x86/kernel/ftrace.c npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); npages 836 arch/x86/kernel/ftrace.c set_memory_x((unsigned long)trampoline, npages); npages 866 arch/x86/kernel/ftrace.c int ret, npages; npages 875 arch/x86/kernel/ftrace.c npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT; npages 876 arch/x86/kernel/ftrace.c set_memory_rw(ops->trampoline, npages); npages 882 arch/x86/kernel/ftrace.c npages = PAGE_ALIGN(size) >> PAGE_SHIFT; npages 895 arch/x86/kernel/ftrace.c set_memory_ro(ops->trampoline, npages); npages 191 arch/x86/kernel/pci-calgary_64.c unsigned long start_addr, unsigned int npages) npages 203 arch/x86/kernel/pci-calgary_64.c end = index + npages; npages 209 arch/x86/kernel/pci-calgary_64.c bitmap_set(tbl->it_map, index, npages); npages 216 arch/x86/kernel/pci-calgary_64.c unsigned int npages) npages 225 arch/x86/kernel/pci-calgary_64.c BUG_ON(npages == 0); npages 230 arch/x86/kernel/pci-calgary_64.c npages, 0, boundary_size, 0); npages 235 arch/x86/kernel/pci-calgary_64.c npages, 0, boundary_size, 0); npages 246 arch/x86/kernel/pci-calgary_64.c tbl->it_hint = offset + npages; npages 255 arch/x86/kernel/pci-calgary_64.c void *vaddr, unsigned int npages, int direction) npages 260 arch/x86/kernel/pci-calgary_64.c entry = iommu_range_alloc(dev, tbl, npages); npages 263 arch/x86/kernel/pci-calgary_64.c npages, tbl); npages 271 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, (unsigned long)vaddr & PAGE_MASK, npages 277 arch/x86/kernel/pci-calgary_64.c unsigned int npages) npages 291 arch/x86/kernel/pci-calgary_64.c BUG_ON(entry + npages > tbl->it_size); npages 293 arch/x86/kernel/pci-calgary_64.c tce_free(tbl, entry, npages); npages 297 arch/x86/kernel/pci-calgary_64.c bitmap_clear(tbl->it_map, entry, npages); npages 337 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 344 arch/x86/kernel/pci-calgary_64.c npages = iommu_num_pages(dma, dmalen, PAGE_SIZE); npages 345 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma, npages); npages 356 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 364 arch/x86/kernel/pci-calgary_64.c npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE); npages 366 arch/x86/kernel/pci-calgary_64.c entry = iommu_range_alloc(dev, tbl, npages); npages 376 arch/x86/kernel/pci-calgary_64.c tce_build(tbl, entry, npages, vaddr & PAGE_MASK, dir); npages 398 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 402 arch/x86/kernel/pci-calgary_64.c npages = iommu_num_pages(uaddr, size, PAGE_SIZE); npages 404 arch/x86/kernel/pci-calgary_64.c return iommu_alloc(dev, tbl, vaddr, npages, dir); npages 412 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 414 arch/x86/kernel/pci-calgary_64.c npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); npages 415 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma_addr, npages); npages 423 arch/x86/kernel/pci-calgary_64.c unsigned int npages, order; npages 427 arch/x86/kernel/pci-calgary_64.c npages = size >> PAGE_SHIFT; npages 437 arch/x86/kernel/pci-calgary_64.c mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); npages 453 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 457 arch/x86/kernel/pci-calgary_64.c npages = size >> PAGE_SHIFT; npages 459 arch/x86/kernel/pci-calgary_64.c iommu_free(tbl, dma_handle, npages); npages 715 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 723 arch/x86/kernel/pci-calgary_64.c npages = ((1024 - 640) * 1024) >> PAGE_SHIFT; npages 726 arch/x86/kernel/pci-calgary_64.c npages = (1 * 1024 * 1024) >> PAGE_SHIFT; npages 728 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, start, npages); npages 1525 arch/x86/kernel/pci-calgary_64.c unsigned int npages; npages 1542 arch/x86/kernel/pci-calgary_64.c npages = resource_size(r) >> PAGE_SHIFT; npages 1543 arch/x86/kernel/pci-calgary_64.c npages++; npages 1545 arch/x86/kernel/pci-calgary_64.c iommu_range_reserve(tbl, r->start, npages); npages 37 arch/x86/kernel/tce_64.c unsigned int npages, unsigned long uaddr, int direction) npages 49 arch/x86/kernel/tce_64.c while (npages--) { npages 62 arch/x86/kernel/tce_64.c void tce_free(struct iommu_table *tbl, long index, unsigned int npages) npages 68 arch/x86/kernel/tce_64.c while (npages--) { npages 2015 arch/x86/kvm/mmu.c (memslot->npages << PAGE_SHIFT)); npages 5730 arch/x86/kvm/mmu.c memslot->base_gfn + memslot->npages - 1, npages 5960 arch/x86/kvm/mmu.c end = min(gfn_end, memslot->base_gfn + memslot->npages); npages 6009 arch/x86/kvm/mmu.c memslot->npages); npages 6080 arch/x86/kvm/mmu.c memslot->npages); npages 6099 arch/x86/kvm/mmu.c memslot->npages); npages 6117 arch/x86/kvm/mmu.c memslot->npages); npages 6373 arch/x86/kvm/mmu.c nr_pages += memslot->npages; npages 36 arch/x86/kvm/page_track.c unsigned long npages) npages 42 arch/x86/kvm/page_track.c kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), npages 135 arch/x86/kvm/paging_tmpl.h int npages; npages 140 arch/x86/kvm/paging_tmpl.h npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page); npages 141 arch/x86/kvm/paging_tmpl.h if (likely(npages == 1)) { npages 428 arch/x86/kvm/svm.c unsigned long npages; npages 1831 arch/x86/kvm/svm.c unsigned long npages, npinned, size; npages 1842 arch/x86/kvm/svm.c npages = (last - first + 1); npages 1844 arch/x86/kvm/svm.c locked = sev->pages_locked + npages; npages 1852 arch/x86/kvm/svm.c size = npages * sizeof(struct page *); npages 1863 arch/x86/kvm/svm.c npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); npages 1864 arch/x86/kvm/svm.c if (npinned != npages) { npages 1865 arch/x86/kvm/svm.c pr_err("SEV: Failure locking %lu pages.\n", npages); npages 1869 arch/x86/kvm/svm.c *n = npages; npages 1883 arch/x86/kvm/svm.c unsigned long npages) npages 1887 arch/x86/kvm/svm.c release_pages(pages, npages); npages 1889 arch/x86/kvm/svm.c sev->pages_locked -= npages; npages 1892 arch/x86/kvm/svm.c static void sev_clflush_pages(struct page *pages[], unsigned long npages) npages 1897 arch/x86/kvm/svm.c if (npages == 0 || pages == NULL) npages 1900 arch/x86/kvm/svm.c for (i = 0; i < npages; i++) { npages 1916 arch/x86/kvm/svm.c sev_clflush_pages(region->pages, region->npages); npages 1918 arch/x86/kvm/svm.c sev_unpin_memory(kvm, region->pages, region->npages); npages 6496 arch/x86/kvm/svm.c struct page **inpages, unsigned long npages) npages 6503 arch/x86/kvm/svm.c while (i < npages) { npages 6518 arch/x86/kvm/svm.c unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i; npages 6540 arch/x86/kvm/svm.c inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1); npages 6552 arch/x86/kvm/svm.c sev_clflush_pages(inpages, npages); npages 6564 arch/x86/kvm/svm.c pages = get_num_contig_pages(i, inpages, npages); npages 6581 arch/x86/kvm/svm.c for (i = 0; i < npages; i++) { npages 6586 arch/x86/kvm/svm.c sev_unpin_memory(kvm, inpages, npages); npages 7090 arch/x86/kvm/svm.c region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1); npages 7102 arch/x86/kvm/svm.c sev_clflush_pages(region->pages, region->npages); npages 9635 arch/x86/kvm/x86.c if (slot->npages) npages 9647 arch/x86/kvm/x86.c if (!slot->npages) npages 9668 arch/x86/kvm/x86.c vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); npages 9739 arch/x86/kvm/x86.c unsigned long npages) npages 9756 arch/x86/kvm/x86.c lpages = gfn_to_index(slot->base_gfn + npages - 1, npages 9775 arch/x86/kvm/x86.c if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) npages 9792 arch/x86/kvm/x86.c if (kvm_page_track_create_memslot(slot, npages)) npages 58 arch/x86/mm/cpu_entry_area.c unsigned int npages; npages 65 arch/x86/mm/cpu_entry_area.c npages = sizeof(struct debug_store) / PAGE_SIZE; npages 67 arch/x86/mm/cpu_entry_area.c cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, npages 75 arch/x86/mm/cpu_entry_area.c npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; npages 76 arch/x86/mm/cpu_entry_area.c for (; npages; npages--, cea += PAGE_SIZE) npages 84 arch/x86/mm/cpu_entry_area.c npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ npages 86 arch/x86/mm/cpu_entry_area.c estacks->name## _stack, npages, PAGE_KERNEL); \ npages 93 arch/x86/mm/cpu_entry_area.c unsigned int npages; npages 241 arch/x86/mm/dump_pagetables.c unsigned long npages; npages 243 arch/x86/mm/dump_pagetables.c npages = (st->current_address - st->start_address) / PAGE_SIZE; npages 252 arch/x86/mm/dump_pagetables.c pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages); npages 257 arch/x86/mm/dump_pagetables.c st->wx_pages += npages; npages 382 arch/x86/mm/mem_encrypt.c unsigned long vaddr, vaddr_end, npages; npages 387 arch/x86/mm/mem_encrypt.c npages = (vaddr_end - vaddr) >> PAGE_SHIFT; npages 394 arch/x86/mm/mem_encrypt.c r = set_memory_encrypted(vaddr, npages); npages 576 arch/x86/platform/efi/efi.c u64 addr, npages; npages 579 arch/x86/platform/efi/efi.c npages = md->num_pages; npages 581 arch/x86/platform/efi/efi.c memrange_efi_to_native(&addr, &npages); npages 584 arch/x86/platform/efi/efi.c set_memory_x(addr, npages); npages 586 arch/x86/platform/efi/efi.c set_memory_nx(addr, npages); npages 605 arch/x86/platform/efi/efi.c u64 npages; npages 607 arch/x86/platform/efi/efi.c npages = round_up(size, page_shift) / page_shift; npages 608 arch/x86/platform/efi/efi.c memrange_efi_to_native(&addr, &npages); npages 609 arch/x86/platform/efi/efi.c set_memory_uc(addr, npages); npages 343 arch/x86/platform/efi/efi_64.c unsigned npages; npages 399 arch/x86/platform/efi/efi_64.c npages = (_etext - _text) >> PAGE_SHIFT; npages 404 arch/x86/platform/efi/efi_64.c if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) { npages 1409 block/bio.c int npages; npages 1417 block/bio.c npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); npages 1423 block/bio.c for (j = 0; j < npages; j++) { npages 1447 block/bio.c while (j < npages) npages 397 crypto/af_alg.c int npages, i; npages 403 crypto/af_alg.c npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 404 crypto/af_alg.c if (WARN_ON(npages == 0)) npages 407 crypto/af_alg.c sg_init_table(sgl->sg, npages + 1); npages 409 crypto/af_alg.c for (i = 0, len = n; i < npages; i++) { npages 417 crypto/af_alg.c sg_mark_end(sgl->sg + npages - 1); npages 418 crypto/af_alg.c sgl->npages = npages; npages 427 crypto/af_alg.c sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); npages 428 crypto/af_alg.c sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); npages 435 crypto/af_alg.c for (i = 0; i < sgl->npages; i++) npages 1142 crypto/af_alg.c rsgl->sgl.npages = 0; npages 271 crypto/algif_aead.c sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); npages 272 crypto/algif_aead.c sg_chain(sgl_prev->sg, sgl_prev->npages + 1, npages 637 drivers/block/drbd/drbd_bitmap.c struct page **npages, **opages = NULL; npages 692 drivers/block/drbd/drbd_bitmap.c npages = b->bm_pages; npages 695 drivers/block/drbd/drbd_bitmap.c npages = NULL; npages 697 drivers/block/drbd/drbd_bitmap.c npages = bm_realloc_pages(b, want); npages 700 drivers/block/drbd/drbd_bitmap.c if (!npages) { npages 714 drivers/block/drbd/drbd_bitmap.c b->bm_pages = npages; npages 737 drivers/block/drbd/drbd_bitmap.c if (opages != npages) npages 856 drivers/edac/i5100_edac.c const unsigned long npages = i5100_npages(mci, i); npages 860 drivers/edac/i5100_edac.c if (!npages) npages 866 drivers/edac/i5100_edac.c dimm->nr_pages = npages; npages 876 drivers/edac/i5100_edac.c chan, rank, (long)PAGES_TO_MiB(npages)); npages 583 drivers/edac/i7core_edac.c u32 size, npages; npages 603 drivers/edac/i7core_edac.c npages = MiB_TO_PAGES(size); npages 605 drivers/edac/i7core_edac.c dimm->nr_pages = npages; npages 1589 drivers/edac/sb_edac.c unsigned int i, j, banks, ranks, rows, cols, npages; npages 1654 drivers/edac/sb_edac.c npages = MiB_TO_PAGES(size); npages 1658 drivers/edac/sb_edac.c size, npages, npages 1661 drivers/edac/sb_edac.c dimm->nr_pages = npages; npages 289 drivers/edac/skx_common.c int banks = 16, ranks, rows, cols, npages; npages 300 drivers/edac/skx_common.c npages = MiB_TO_PAGES(size); npages 303 drivers/edac/skx_common.c imc->mc, chan, dimmno, size, npages, npages 312 drivers/edac/skx_common.c dimm->nr_pages = npages; npages 181 drivers/firmware/efi/arm-init.c u64 paddr, npages, size; npages 196 drivers/firmware/efi/arm-init.c npages = md->num_pages; npages 202 drivers/firmware/efi/arm-init.c paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, npages 206 drivers/firmware/efi/arm-init.c memrange_efi_to_native(&paddr, &npages); npages 207 drivers/firmware/efi/arm-init.c size = npages << PAGE_SHIFT; npages 19 drivers/fpga/dfl-afu-dma-region.c static void put_all_pages(struct page **pages, int npages) npages 23 drivers/fpga/dfl-afu-dma-region.c for (i = 0; i < npages; i++) npages 46 drivers/fpga/dfl-afu-dma-region.c int npages = region->length >> PAGE_SHIFT; npages 50 drivers/fpga/dfl-afu-dma-region.c ret = account_locked_vm(current->mm, npages, true); npages 54 drivers/fpga/dfl-afu-dma-region.c region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); npages 60 drivers/fpga/dfl-afu-dma-region.c pinned = get_user_pages_fast(region->user_addr, npages, FOLL_WRITE, npages 65 drivers/fpga/dfl-afu-dma-region.c } else if (pinned != npages) { npages 79 drivers/fpga/dfl-afu-dma-region.c account_locked_vm(current->mm, npages, false); npages 94 drivers/fpga/dfl-afu-dma-region.c long npages = region->length >> PAGE_SHIFT; npages 97 drivers/fpga/dfl-afu-dma-region.c put_all_pages(region->pages, npages); npages 99 drivers/fpga/dfl-afu-dma-region.c account_locked_vm(current->mm, npages, false); npages 101 drivers/fpga/dfl-afu-dma-region.c dev_dbg(dev, "%ld pages unpinned\n", npages); npages 113 drivers/fpga/dfl-afu-dma-region.c int npages = region->length >> PAGE_SHIFT; npages 116 drivers/fpga/dfl-afu-dma-region.c for (i = 0; i < npages - 1; i++) npages 52 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c int npages = bo->tbo.num_pages; npages 54 drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); npages 558 drivers/gpu/drm/drm_gem.c int i, npages; npages 569 drivers/gpu/drm/drm_gem.c npages = obj->size >> PAGE_SHIFT; npages 571 drivers/gpu/drm/drm_gem.c pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 577 drivers/gpu/drm/drm_gem.c for (i = 0; i < npages; i++) { npages 619 drivers/gpu/drm/drm_gem.c int i, npages; npages 632 drivers/gpu/drm/drm_gem.c npages = obj->size >> PAGE_SHIFT; npages 635 drivers/gpu/drm/drm_gem.c for (i = 0; i < npages; i++) { npages 661 drivers/gpu/drm/drm_gem_shmem_helper.c size_t npages = size >> PAGE_SHIFT; npages 669 drivers/gpu/drm/drm_gem_shmem_helper.c shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 675 drivers/gpu/drm/drm_gem_shmem_helper.c ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages); npages 103 drivers/gpu/drm/etnaviv/etnaviv_gem.c int npages = etnaviv_obj->base.size >> PAGE_SHIFT; npages 106 drivers/gpu/drm/etnaviv/etnaviv_gem.c sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); npages 662 drivers/gpu/drm/etnaviv/etnaviv_gem.c int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT; npages 669 drivers/gpu/drm/etnaviv/etnaviv_gem.c pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 674 drivers/gpu/drm/etnaviv/etnaviv_gem.c unsigned num_pages = npages - pinned; npages 688 drivers/gpu/drm/etnaviv/etnaviv_gem.c } while (pinned < npages); npages 703 drivers/gpu/drm/etnaviv/etnaviv_gem.c int npages = etnaviv_obj->base.size >> PAGE_SHIFT; npages 705 drivers/gpu/drm/etnaviv/etnaviv_gem.c release_pages(etnaviv_obj->pages, npages); npages 17 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c int npages = obj->size >> PAGE_SHIFT; npages 22 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); npages 111 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c int ret, npages; npages 120 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c npages = size / PAGE_SIZE; npages 123 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 130 drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c NULL, npages); npages 429 drivers/gpu/drm/exynos/exynos_drm_g2d.c unsigned int npages, offset; npages 476 drivers/gpu/drm/exynos/exynos_drm_g2d.c npages = (end - start) >> PAGE_SHIFT; npages 477 drivers/gpu/drm/exynos/exynos_drm_g2d.c g2d_userptr->vec = frame_vector_create(npages); npages 483 drivers/gpu/drm/exynos/exynos_drm_g2d.c ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, npages 485 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (ret != npages) { npages 506 drivers/gpu/drm/exynos/exynos_drm_g2d.c npages, offset, size, GFP_KERNEL); npages 526 drivers/gpu/drm/exynos/exynos_drm_g2d.c if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) { npages 527 drivers/gpu/drm/exynos/exynos_drm_g2d.c g2d->current_pool += npages << PAGE_SHIFT; npages 465 drivers/gpu/drm/exynos/exynos_drm_gem.c int npages; npages 467 drivers/gpu/drm/exynos/exynos_drm_gem.c npages = exynos_gem->size >> PAGE_SHIFT; npages 469 drivers/gpu/drm/exynos/exynos_drm_gem.c return drm_prime_pages_to_sg(exynos_gem->pages, npages); npages 478 drivers/gpu/drm/exynos/exynos_drm_gem.c int npages; npages 489 drivers/gpu/drm/exynos/exynos_drm_gem.c npages = exynos_gem->size >> PAGE_SHIFT; npages 490 drivers/gpu/drm/exynos/exynos_drm_gem.c exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 497 drivers/gpu/drm/exynos/exynos_drm_gem.c npages); npages 41 drivers/gpu/drm/i915/gem/i915_gem_internal.c unsigned int npages; npages 71 drivers/gpu/drm/i915/gem/i915_gem_internal.c npages = obj->base.size / PAGE_SIZE; npages 72 drivers/gpu/drm/i915/gem/i915_gem_internal.c if (sg_alloc_table(st, npages, GFP_KERNEL)) { npages 82 drivers/gpu/drm/i915/gem/i915_gem_internal.c int order = min(fls(npages) - 1, max_order); npages 101 drivers/gpu/drm/i915/gem/i915_gem_internal.c npages -= 1 << order; npages 102 drivers/gpu/drm/i915/gem/i915_gem_internal.c if (!npages) { npages 476 drivers/gpu/drm/i915/gem/i915_gem_userptr.c const unsigned long npages = obj->base.size >> PAGE_SHIFT; npages 484 drivers/gpu/drm/i915/gem/i915_gem_userptr.c pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 495 drivers/gpu/drm/i915/gem/i915_gem_userptr.c while (pinned < npages) { npages 499 drivers/gpu/drm/i915/gem/i915_gem_userptr.c npages - pinned, npages 516 drivers/gpu/drm/i915/gem/i915_gem_userptr.c if (pinned == npages) { npages 518 drivers/gpu/drm/i915/gem/i915_gem_userptr.c npages); npages 28 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c const unsigned long npages = obj->base.size / PAGE_SIZE; npages 37 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c if (sg_alloc_table(pages, npages, GFP)) { npages 55 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c if (nreal < npages) { npages 345 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c unsigned long npages = fake_page_count(obj); npages 347 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE)); npages 348 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c return npages / DW_PER_PAGE; npages 82 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c const unsigned long npages = obj->base.size / PAGE_SIZE; npages 110 drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c for_each_prime_number_from(page, 1, npages) { npages 21 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c err = sg_alloc_table(st, mock->npages, GFP_KERNEL); npages 26 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c for (i = 0; i < mock->npages; i++) { npages 59 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c for (i = 0; i < mock->npages; i++) npages 69 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL); npages 76 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c vm_unmap_ram(vaddr, mock->npages); npages 109 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c static struct dma_buf *mock_dmabuf(int npages) npages 116 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), npages 121 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c mock->npages = npages; npages 122 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c for (i = 0; i < npages; i++) { npages 129 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c exp_info.size = npages * PAGE_SIZE; npages 13 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.h int npages; npages 1724 drivers/gpu/drm/i915/gvt/kvmgt.c for (i = 0; i < slot->npages; i++) { npages 352 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c unsigned long npages, prime, flags; npages 364 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c for (npages = 1; npages <= max_pages; npages *= prime) { npages 365 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c const u64 full_size = npages << PAGE_SHIFT; npages 404 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, err, npages, prime, offset); npages 477 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, p->name, err, npages, prime, offset); npages 537 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c __func__, npages, prime)) { npages 699 drivers/gpu/drm/i915/selftests/i915_vma.c const unsigned int npages = 1021; /* prime! */ npages 716 drivers/gpu/drm/i915/selftests/i915_vma.c obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE); npages 724 drivers/gpu/drm/i915/selftests/i915_vma.c for_each_prime_number_from(sz, 1, npages) { npages 725 drivers/gpu/drm/i915/selftests/i915_vma.c for_each_prime_number_from(offset, 0, npages - sz) { npages 732 drivers/gpu/drm/i915/selftests/i915_vma.c if (sz == npages) npages 53 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned int npages = npages_fn(n, pt->st.nents, rnd); npages 61 drivers/gpu/drm/i915/selftests/scatterlist.c if (sg->length != npages * PAGE_SIZE) { npages 63 drivers/gpu/drm/i915/selftests/scatterlist.c __func__, who, npages * PAGE_SIZE, sg->length); npages 70 drivers/gpu/drm/i915/selftests/scatterlist.c pfn += npages; npages 209 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long npages) npages 211 drivers/gpu/drm/i915/selftests/scatterlist.c return first + npages == last; npages 238 drivers/gpu/drm/i915/selftests/scatterlist.c unsigned long npages = npages_fn(n, count, rnd); npages 242 drivers/gpu/drm/i915/selftests/scatterlist.c pfn_to_page(pfn + npages), npages 243 drivers/gpu/drm/i915/selftests/scatterlist.c npages)) { npages 250 drivers/gpu/drm/i915/selftests/scatterlist.c sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0); npages 253 drivers/gpu/drm/i915/selftests/scatterlist.c GEM_BUG_ON(sg->length != npages * PAGE_SIZE); npages 256 drivers/gpu/drm/i915/selftests/scatterlist.c pfn += npages; npages 288 drivers/gpu/drm/i915/selftests/scatterlist.c const npages_fn_t *npages; npages 292 drivers/gpu/drm/i915/selftests/scatterlist.c for (npages = npages_funcs; *npages; npages++) { npages 295 drivers/gpu/drm/i915/selftests/scatterlist.c err = alloc_table(&pt, sz, sz, *npages, &prng, npages 304 drivers/gpu/drm/i915/selftests/scatterlist.c err = expect_pfn_sgtable(&pt, *npages, &prng, npages 330 drivers/gpu/drm/i915/selftests/scatterlist.c const npages_fn_t *npages; npages 333 drivers/gpu/drm/i915/selftests/scatterlist.c for (npages = npages_funcs; *npages; npages++) { npages 337 drivers/gpu/drm/i915/selftests/scatterlist.c err = alloc_table(&pt, prime, max, *npages, &prng, npages 354 drivers/gpu/drm/i915/selftests/scatterlist.c *npages, &prng, npages 31 drivers/gpu/drm/lima/lima_gem_prime.c int npages = obj->size >> PAGE_SHIFT; npages 33 drivers/gpu/drm/lima/lima_gem_prime.c return drm_prime_pages_to_sg(bo->pages, npages); npages 17 drivers/gpu/drm/lima/lima_object.c int i, npages = bo->gem.size >> PAGE_SHIFT; npages 19 drivers/gpu/drm/lima/lima_object.c for (i = 0; i < npages; i++) { npages 63 drivers/gpu/drm/lima/lima_object.c size_t npages; npages 70 drivers/gpu/drm/lima/lima_object.c npages = bo->gem.size >> PAGE_SHIFT; npages 72 drivers/gpu/drm/lima/lima_object.c bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL); npages 81 drivers/gpu/drm/lima/lima_object.c bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); npages 88 drivers/gpu/drm/lima/lima_object.c sgt, bo->pages, bo->pages_dma_addr, npages); npages 102 drivers/gpu/drm/lima/lima_object.c for (i = 0; i < npages; i++) { npages 250 drivers/gpu/drm/mediatek/mtk_drm_gem.c unsigned int npages; npages 260 drivers/gpu/drm/mediatek/mtk_drm_gem.c npages = obj->size >> PAGE_SHIFT; npages 261 drivers/gpu/drm/mediatek/mtk_drm_gem.c mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL); npages 267 drivers/gpu/drm/mediatek/mtk_drm_gem.c if (i > npages) npages 270 drivers/gpu/drm/mediatek/mtk_drm_gem.c mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, npages 236 drivers/gpu/drm/msm/msm_drv.h struct msm_gem_vma *vma, int npages); npages 243 drivers/gpu/drm/msm/msm_drv.h struct sg_table *sgt, int npages); npages 78 drivers/gpu/drm/msm/msm_gem.c static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) npages 86 drivers/gpu/drm/msm/msm_gem.c p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 91 drivers/gpu/drm/msm/msm_gem.c ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); npages 99 drivers/gpu/drm/msm/msm_gem.c for (i = 0; i < npages; i++) { npages 114 drivers/gpu/drm/msm/msm_gem.c int npages = obj->size >> PAGE_SHIFT; npages 119 drivers/gpu/drm/msm/msm_gem.c p = get_pages_vram(obj, npages); npages 129 drivers/gpu/drm/msm/msm_gem.c msm_obj->sgt = drm_prime_pages_to_sg(p, npages); npages 1114 drivers/gpu/drm/msm/msm_gem.c int ret, npages; npages 1130 drivers/gpu/drm/msm/msm_gem.c npages = size / PAGE_SIZE; npages 1135 drivers/gpu/drm/msm/msm_gem.c msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 1142 drivers/gpu/drm/msm/msm_gem.c ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); npages 17 drivers/gpu/drm/msm/msm_gem_prime.c int npages = obj->size >> PAGE_SHIFT; npages 22 drivers/gpu/drm/msm/msm_gem_prime.c return drm_prime_pages_to_sg(msm_obj->pages, npages); npages 61 drivers/gpu/drm/msm/msm_gem_vma.c struct sg_table *sgt, int npages) npages 63 drivers/gpu/drm/msm/msm_gem_vma.c unsigned size = npages << PAGE_SHIFT; npages 106 drivers/gpu/drm/msm/msm_gem_vma.c struct msm_gem_vma *vma, int npages) npages 114 drivers/gpu/drm/msm/msm_gem_vma.c ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); npages 53 drivers/gpu/drm/nouveau/nouveau_dmem.c typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages, npages 278 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long npages, npages 285 drivers/gpu/drm/nouveau/nouveau_dmem.c memset(pages, 0xff, npages * sizeof(*pages)); npages 288 drivers/gpu/drm/nouveau/nouveau_dmem.c for (c = 0; c < npages;) { npages 306 drivers/gpu/drm/nouveau/nouveau_dmem.c while (i < DMEM_CHUNK_NPAGES && c < npages) { npages 414 drivers/gpu/drm/nouveau/nouveau_dmem.c nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages, npages 466 drivers/gpu/drm/nouveau/nouveau_dmem.c OUT_RING (chan, npages); npages 628 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long npages = (end - start) >> PAGE_SHIFT; npages 629 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages); npages 649 drivers/gpu/drm/nouveau/nouveau_dmem.c for (i = 0; i < npages; i += c) { npages 650 drivers/gpu/drm/nouveau/nouveau_dmem.c c = min(SG_MAX_SINGLE_ALLOC, npages); npages 682 drivers/gpu/drm/nouveau/nouveau_dmem.c unsigned long i, npages; npages 684 drivers/gpu/drm/nouveau/nouveau_dmem.c npages = (range->end - range->start) >> PAGE_SHIFT; npages 685 drivers/gpu/drm/nouveau/nouveau_dmem.c for (i = 0; i < npages; ++i) { npages 33 drivers/gpu/drm/nouveau/nouveau_prime.c int npages = nvbo->bo.num_pages; npages 35 drivers/gpu/drm/nouveau/nouveau_prime.c return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); npages 128 drivers/gpu/drm/nouveau/nouveau_svm.c if (!args->npages) npages 161 drivers/gpu/drm/nouveau/nouveau_svm.c size = ((unsigned long)args->npages) << PAGE_SHIFT; npages 383 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, npages 397 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, npages 414 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->r.length = (npages << PAGE_SHIFT) >> 12; npages 421 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, npages 436 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) npages 439 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c node->dma_addrs = (void *)(node->pages + npages); npages 445 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c for (i = 0; i < npages; i++) { npages 465 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, npages 474 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c for (i = 0; i < npages; i++) { npages 502 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c for (i = 0; i < npages && node->pages[i] != NULL; i++) { npages 354 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c struct page **pages, u32 npages, u32 roll) npages 386 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c if (n >= npages) npages 387 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c n -= npages; npages 469 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c u32 npages, u32 roll, bool wait) npages 500 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c dmm_txn_append(txn, &p_area, pages, npages, roll); npages 517 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c u32 npages, u32 roll, bool wait) npages 521 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c ret = fill(&block->area, pages, npages, roll, wait); npages 91 drivers/gpu/drm/omapdrm/omap_dmm_tiler.h u32 npages, u32 roll, bool wait); npages 41 drivers/gpu/drm/omapdrm/omap_fbdev.c int npages; npages 44 drivers/gpu/drm/omapdrm/omap_fbdev.c npages = fbi->fix.line_length >> PAGE_SHIFT; npages 45 drivers/gpu/drm/omapdrm/omap_fbdev.c omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages); npages 226 drivers/gpu/drm/omapdrm/omap_gem.c int npages = obj->size >> PAGE_SHIFT; npages 249 drivers/gpu/drm/omapdrm/omap_gem.c addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL); npages 255 drivers/gpu/drm/omapdrm/omap_gem.c for (i = 0; i < npages; i++) { npages 273 drivers/gpu/drm/omapdrm/omap_gem.c addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL); npages 297 drivers/gpu/drm/omapdrm/omap_gem.c unsigned int npages = obj->size >> PAGE_SHIFT; npages 302 drivers/gpu/drm/omapdrm/omap_gem.c for (i = 0; i < npages; i++) { npages 647 drivers/gpu/drm/omapdrm/omap_gem.c u32 npages = obj->size >> PAGE_SHIFT; npages 650 drivers/gpu/drm/omapdrm/omap_gem.c if (roll > npages) { npages 665 drivers/gpu/drm/omapdrm/omap_gem.c ret = tiler_pin(omap_obj->block, omap_obj->pages, npages, npages 723 drivers/gpu/drm/omapdrm/omap_gem.c int i, npages = obj->size >> PAGE_SHIFT; npages 730 drivers/gpu/drm/omapdrm/omap_gem.c for (i = 0; i < npages; i++) { npages 777 drivers/gpu/drm/omapdrm/omap_gem.c u32 npages = obj->size >> PAGE_SHIFT; npages 803 drivers/gpu/drm/omapdrm/omap_gem.c ret = tiler_pin(block, omap_obj->pages, npages, npages 998 drivers/gpu/drm/omapdrm/omap_gem.c u32 npages = obj->size >> PAGE_SHIFT; npages 1002 drivers/gpu/drm/omapdrm/omap_gem.c omap_obj->pages, npages, npages 1257 drivers/gpu/drm/omapdrm/omap_gem.c unsigned int npages; npages 1260 drivers/gpu/drm/omapdrm/omap_gem.c npages = DIV_ROUND_UP(size, PAGE_SIZE); npages 1261 drivers/gpu/drm/omapdrm/omap_gem.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); npages 1272 drivers/gpu/drm/omapdrm/omap_gem.c if (i > npages) npages 1276 drivers/gpu/drm/omapdrm/omap_gem.c if (WARN_ON(i != npages)) { npages 37 drivers/gpu/drm/radeon/radeon_prime.c int npages = bo->tbo.num_pages; npages 39 drivers/gpu/drm/radeon/radeon_prime.c return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); npages 1434 drivers/gpu/drm/ttm/ttm_bo.c unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; npages 1438 drivers/gpu/drm/ttm/ttm_bo.c size += ttm_round_pot(npages * sizeof(void *)); npages 1448 drivers/gpu/drm/ttm/ttm_bo.c unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT; npages 1452 drivers/gpu/drm/ttm/ttm_bo.c size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); npages 74 drivers/gpu/drm/ttm/ttm_page_alloc.c unsigned npages; npages 247 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_pages_put(struct page *pages[], unsigned npages, npages 253 drivers/gpu/drm/ttm/ttm_page_alloc.c if (ttm_set_pages_array_wb(pages, npages)) npages 254 drivers/gpu/drm/ttm/ttm_page_alloc.c pr_err("Failed to set %d pages to wb!\n", npages); npages 257 drivers/gpu/drm/ttm/ttm_page_alloc.c for (i = 0; i < npages; ++i) { npages 269 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages -= freed_pages; npages 423 drivers/gpu/drm/ttm/ttm_page_alloc.c count += (pool->npages << pool->order); npages 495 drivers/gpu/drm/ttm/ttm_page_alloc.c unsigned npages = 1 << order; npages 537 drivers/gpu/drm/ttm/ttm_page_alloc.c for (j = 0; j < npages; ++j) { npages 591 drivers/gpu/drm/ttm/ttm_page_alloc.c && count > pool->npages) { npages 609 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages += alloc_size; npages 617 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages += cpages; npages 645 drivers/gpu/drm/ttm/ttm_page_alloc.c if (count >= pool->npages) { npages 648 drivers/gpu/drm/ttm/ttm_page_alloc.c count -= pool->npages; npages 649 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages = 0; npages 654 drivers/gpu/drm/ttm/ttm_page_alloc.c if (count <= pool->npages/2) { npages 661 drivers/gpu/drm/ttm/ttm_page_alloc.c i = pool->npages + 1; npages 669 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages -= count; npages 708 drivers/gpu/drm/ttm/ttm_page_alloc.c static void ttm_put_pages(struct page **pages, unsigned npages, int flags, npages 721 drivers/gpu/drm/ttm/ttm_page_alloc.c while (i < npages) { npages 734 drivers/gpu/drm/ttm/ttm_page_alloc.c (npages - i) >= HPAGE_PMD_NR) { npages 763 drivers/gpu/drm/ttm/ttm_page_alloc.c while ((npages - i) >= HPAGE_PMD_NR) { npages 781 drivers/gpu/drm/ttm/ttm_page_alloc.c huge->npages++; npages 787 drivers/gpu/drm/ttm/ttm_page_alloc.c if (huge->npages > max_size) npages 788 drivers/gpu/drm/ttm/ttm_page_alloc.c n2free = huge->npages - max_size; npages 798 drivers/gpu/drm/ttm/ttm_page_alloc.c while (i < npages) { npages 804 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages++; npages 809 drivers/gpu/drm/ttm/ttm_page_alloc.c npages = 0; npages 810 drivers/gpu/drm/ttm/ttm_page_alloc.c if (pool->npages > _manager->options.max_size) { npages 811 drivers/gpu/drm/ttm/ttm_page_alloc.c npages = pool->npages - _manager->options.max_size; npages 814 drivers/gpu/drm/ttm/ttm_page_alloc.c if (npages < NUM_PAGES_TO_ALLOC) npages 815 drivers/gpu/drm/ttm/ttm_page_alloc.c npages = NUM_PAGES_TO_ALLOC; npages 818 drivers/gpu/drm/ttm/ttm_page_alloc.c if (npages) npages 819 drivers/gpu/drm/ttm/ttm_page_alloc.c ttm_page_pool_free(pool, npages, false); npages 826 drivers/gpu/drm/ttm/ttm_page_alloc.c static int ttm_get_pages(struct page **pages, unsigned npages, int flags, npages 861 drivers/gpu/drm/ttm/ttm_page_alloc.c while (npages >= HPAGE_PMD_NR) { npages 875 drivers/gpu/drm/ttm/ttm_page_alloc.c npages -= HPAGE_PMD_NR; npages 881 drivers/gpu/drm/ttm/ttm_page_alloc.c while (npages) { npages 893 drivers/gpu/drm/ttm/ttm_page_alloc.c --npages; npages 901 drivers/gpu/drm/ttm/ttm_page_alloc.c if (huge && npages >= HPAGE_PMD_NR) { npages 904 drivers/gpu/drm/ttm/ttm_page_alloc.c npages / HPAGE_PMD_NR, npages 918 drivers/gpu/drm/ttm/ttm_page_alloc.c npages - count, 0); npages 948 drivers/gpu/drm/ttm/ttm_page_alloc.c pool->npages = pool->nfrees = 0; npages 1185 drivers/gpu/drm/ttm/ttm_page_alloc.c p->nfrees, p->npages); npages 371 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c struct page *pages[], unsigned npages) npages 383 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (npages && !(pool->type & IS_CACHED) && npages 384 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm_set_pages_array_wb(pages, npages)) npages 386 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c pool->dev_name, npages); npages 707 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c unsigned i, j, npages, cpages; npages 755 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c npages = pool->size / PAGE_SIZE; npages 756 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c for (j = 0; j < npages; ++j) { npages 1000 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c unsigned count, i, npages = 0; npages 1067 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c npages = pool->npages_free - _manager->options.max_size; npages 1078 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c if (npages) npages 1079 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c ttm_dma_page_pool_free(pool, npages, false); npages 193 drivers/gpu/drm/udl/udl_dmabuf.c int npages; npages 195 drivers/gpu/drm/udl/udl_dmabuf.c npages = size / PAGE_SIZE; npages 198 drivers/gpu/drm/udl/udl_dmabuf.c obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE); npages 203 drivers/gpu/drm/udl/udl_dmabuf.c obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 205 drivers/gpu/drm/udl/udl_dmabuf.c DRM_ERROR("obj pages is NULL %d\n", npages); npages 209 drivers/gpu/drm/udl/udl_dmabuf.c drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); npages 118 drivers/gpu/drm/v3d/v3d_mmu.c u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT; npages 121 drivers/gpu/drm/v3d/v3d_mmu.c for (page = bo->node.start; page < bo->node.start + npages; page++) npages 364 drivers/gpu/drm/vgem/vgem_drv.c int npages; npages 370 drivers/gpu/drm/vgem/vgem_drv.c npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; npages 373 drivers/gpu/drm/vgem/vgem_drv.c obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 381 drivers/gpu/drm/vgem/vgem_drv.c npages); npages 352 drivers/hwtracing/coresight/coresight-tmc-etr.c int npages = DIV_ROUND_UP(size, PAGE_SIZE); npages 357 drivers/hwtracing/coresight/coresight-tmc-etr.c for (i = start; i < (start + npages); i++) { npages 75 drivers/infiniband/core/umem.c unsigned long npages, npages 91 drivers/infiniband/core/umem.c while (i != npages) { npages 100 drivers/infiniband/core/umem.c for (len = 0; i != npages && npages 203 drivers/infiniband/core/umem.c unsigned long npages; npages 250 drivers/infiniband/core/umem.c npages = ib_umem_num_pages(umem); npages 251 drivers/infiniband/core/umem.c if (npages == 0 || npages > UINT_MAX) { npages 258 drivers/infiniband/core/umem.c new_pinned = atomic64_add_return(npages, &mm->pinned_vm); npages 260 drivers/infiniband/core/umem.c atomic64_sub(npages, &mm->pinned_vm); npages 267 drivers/infiniband/core/umem.c ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL); npages 276 drivers/infiniband/core/umem.c while (npages) { npages 279 drivers/infiniband/core/umem.c min_t(unsigned long, npages, npages 289 drivers/infiniband/core/umem.c npages -= ret; npages 533 drivers/infiniband/core/umem_odp.c umem_odp->npages++; npages 595 drivers/infiniband/core/umem_odp.c int j, k, ret = 0, start_idx, npages = 0; npages 646 drivers/infiniband/core/umem_odp.c npages = get_user_pages_remote(owning_process, owning_mm, npages 651 drivers/infiniband/core/umem_odp.c if (npages < 0) { npages 652 drivers/infiniband/core/umem_odp.c if (npages != -EAGAIN) npages 653 drivers/infiniband/core/umem_odp.c pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages); npages 655 drivers/infiniband/core/umem_odp.c pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages); npages 659 drivers/infiniband/core/umem_odp.c bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); npages 661 drivers/infiniband/core/umem_odp.c for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) { npages 694 drivers/infiniband/core/umem_odp.c if (npages - (j + 1) > 0) npages 696 drivers/infiniband/core/umem_odp.c npages - (j + 1)); npages 702 drivers/infiniband/core/umem_odp.c if (npages < 0 && k == start_idx) npages 703 drivers/infiniband/core/umem_odp.c ret = npages; npages 760 drivers/infiniband/core/umem_odp.c umem_odp->npages--; npages 864 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem); npages 877 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem); npages 1331 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_srq->sg_info.npages = ib_umem_num_pages(umem); npages 2182 drivers/infiniband/hw/bnxt_re/ib_verbs.c wqe->frmr.page_list_len = mr->npages; npages 2574 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem); npages 3335 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->npages = 0; npages 3349 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs)) npages 3352 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->pages[mr->npages++] = addr; npages 3361 drivers/infiniband/hw/bnxt_re/ib_verbs.c mr->npages = 0; npages 114 drivers/infiniband/hw/bnxt_re/ib_verbs.h u32 npages; npages 77 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h u32 npages; npages 79 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h npages = BNXT_QPLIB_CMDQE_BYTES(depth) / PAGE_SIZE; npages 81 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h npages++; npages 82 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h return npages; npages 195 drivers/infiniband/hw/bnxt_re/qplib_res.c pages = sg_info->npages; npages 229 drivers/infiniband/hw/bnxt_re/qplib_res.h u32 npages; npages 118 drivers/infiniband/hw/bnxt_re/qplib_sp.h u32 npages; npages 78 drivers/infiniband/hw/cxgb3/iwch_mem.c int iwch_alloc_pbl(struct iwch_mr *mhp, int npages) npages 81 drivers/infiniband/hw/cxgb3/iwch_mem.c npages << 3); npages 86 drivers/infiniband/hw/cxgb3/iwch_mem.c mhp->attr.pbl_size = npages; npages 97 drivers/infiniband/hw/cxgb3/iwch_mem.c int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset) npages 100 drivers/infiniband/hw/cxgb3/iwch_mem.c mhp->attr.pbl_addr + (offset << 3), npages); npages 363 drivers/infiniband/hw/cxgb3/iwch_provider.c int shift = 26, npages, ret, i; npages 381 drivers/infiniband/hw/cxgb3/iwch_provider.c npages = (total_size + (1ULL << shift) - 1) >> shift; npages 382 drivers/infiniband/hw/cxgb3/iwch_provider.c if (!npages) { npages 387 drivers/infiniband/hw/cxgb3/iwch_provider.c page_list = kmalloc_array(npages, sizeof(u64), GFP_KERNEL); npages 393 drivers/infiniband/hw/cxgb3/iwch_provider.c for (i = 0; i < npages; i++) npages 397 drivers/infiniband/hw/cxgb3/iwch_provider.c __func__, mask, shift, total_size, npages); npages 399 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = iwch_alloc_pbl(mhp, npages); npages 405 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = iwch_write_pbl(mhp, page_list, npages, 0); npages 418 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->attr.pbl_size = npages; npages 647 drivers/infiniband/hw/cxgb3/iwch_provider.c if (unlikely(mhp->npages == mhp->attr.pbl_size)) npages 650 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->pages[mhp->npages++] = addr; npages 660 drivers/infiniband/hw/cxgb3/iwch_provider.c mhp->npages = 0; npages 81 drivers/infiniband/hw/cxgb3/iwch_provider.h u32 npages; npages 341 drivers/infiniband/hw/cxgb3/iwch_provider.h int iwch_alloc_pbl(struct iwch_mr *mhp, int npages); npages 343 drivers/infiniband/hw/cxgb3/iwch_provider.h int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset); npages 156 drivers/infiniband/hw/cxgb3/iwch_qp.c if (mhp->npages > T3_MAX_FASTREG_DEPTH) npages 165 drivers/infiniband/hw/cxgb3/iwch_qp.c V_FR_PAGE_COUNT(mhp->npages) | npages 170 drivers/infiniband/hw/cxgb3/iwch_qp.c for (i = 0; i < mhp->npages; i++, p++) { npages 179 drivers/infiniband/hw/cxgb3/iwch_qp.c 0, 1 + mhp->npages - T3_MAX_FASTREG_FRAG, npages 186 drivers/infiniband/hw/cxgb3/iwch_qp.c *flit_cnt = 5 + mhp->npages; npages 434 drivers/infiniband/hw/cxgb4/mem.c static int alloc_pbl(struct c4iw_mr *mhp, int npages) npages 437 drivers/infiniband/hw/cxgb4/mem.c npages << 3); npages 442 drivers/infiniband/hw/cxgb4/mem.c mhp->attr.pbl_size = npages; npages 2017 drivers/infiniband/hw/hfi1/hfi.h u32 nlocked, u32 npages); npages 2019 drivers/infiniband/hw/hfi1/hfi.h size_t npages, bool writable, struct page **pages); npages 2021 drivers/infiniband/hw/hfi1/hfi.h size_t npages, bool dirty); npages 877 drivers/infiniband/hw/hfi1/tid_rdma.c u32 npages, npages 883 drivers/infiniband/hw/hfi1/tid_rdma.c if (!npages) npages 893 drivers/infiniband/hw/hfi1/tid_rdma.c for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { npages 894 drivers/infiniband/hw/hfi1/tid_rdma.c this_vaddr = i < npages ? page_address(pages[i]) : NULL; npages 1015 drivers/infiniband/hw/hfi1/tid_rdma.c u32 npages, npages 1022 drivers/infiniband/hw/hfi1/tid_rdma.c if (!npages) npages 1024 drivers/infiniband/hw/hfi1/tid_rdma.c for (idx = 0, i = 0, vm1 = NULL; i < npages; i += 2) { npages 1028 drivers/infiniband/hw/hfi1/tid_rdma.c v1 = i + 1 < npages ? npages 1061 drivers/infiniband/hw/hfi1/tid_rdma.c sets = tid_flush_pages(list, &idx, npages - idx, sets); npages 1172 drivers/infiniband/hw/hfi1/tid_rdma.c u8 npages; npages 1183 drivers/infiniband/hw/hfi1/tid_rdma.c npages = kern_find_pages(flow, pages, ss, last); npages 1187 drivers/infiniband/hw/hfi1/tid_rdma.c tid_rdma_find_phys_blocks_4k(flow, pages, npages, npages 1191 drivers/infiniband/hw/hfi1/tid_rdma.c tid_rdma_find_phys_blocks_8k(flow, pages, npages, npages 1302 drivers/infiniband/hw/hfi1/tid_rdma.c u32 rcventry, npages = 0, pair = 0, tidctrl; npages 1319 drivers/infiniband/hw/hfi1/tid_rdma.c npages += pset->count; npages 1338 drivers/infiniband/hw/hfi1/tid_rdma.c EXP_TID_SET(LEN, npages); npages 1344 drivers/infiniband/hw/hfi1/tid_rdma.c flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg); npages 1345 drivers/infiniband/hw/hfi1/tid_rdma.c npages = 0; npages 88 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, npages 90 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), npages 95 drivers/infiniband/hw/hfi1/trace_tid.h __field(u32, npages) npages 104 drivers/infiniband/hw/hfi1/trace_tid.h __entry->npages = npages; npages 113 drivers/infiniband/hw/hfi1/trace_tid.h __entry->npages, npages 122 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, npages 124 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) npages 129 drivers/infiniband/hw/hfi1/trace_tid.h TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, npages 131 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) npages 165 drivers/infiniband/hw/hfi1/trace_tid.h u32 npages, dma_addr_t dma), npages 166 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(ctxt, subctxt, va, rarr, npages, dma), npages 172 drivers/infiniband/hw/hfi1/trace_tid.h __field(u32, npages) npages 180 drivers/infiniband/hw/hfi1/trace_tid.h __entry->npages = npages; npages 187 drivers/infiniband/hw/hfi1/trace_tid.h __entry->npages, npages 57 drivers/infiniband/hw/hfi1/user_exp_rcv.c static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); npages 61 drivers/infiniband/hw/hfi1/user_exp_rcv.c u16 pageidx, unsigned int npages); npages 195 drivers/infiniband/hw/hfi1/user_exp_rcv.c unsigned int npages, npages 208 drivers/infiniband/hw/hfi1/user_exp_rcv.c hfi1_release_user_pages(fd->mm, pages, npages, mapped); npages 209 drivers/infiniband/hw/hfi1/user_exp_rcv.c fd->tid_n_pinned -= npages; npages 218 drivers/infiniband/hw/hfi1/user_exp_rcv.c unsigned int npages; npages 224 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages = num_user_pages(vaddr, tidbuf->length); npages 225 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!npages) npages 228 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (npages > fd->uctxt->expected_count) { npages 235 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages * PAGE_SIZE)) { npages 237 drivers/infiniband/hw/hfi1/user_exp_rcv.c (void *)vaddr, npages); npages 241 drivers/infiniband/hw/hfi1/user_exp_rcv.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); npages 250 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) { npages 255 drivers/infiniband/hw/hfi1/user_exp_rcv.c pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages); npages 261 drivers/infiniband/hw/hfi1/user_exp_rcv.c tidbuf->npages = npages; npages 588 drivers/infiniband/hw/hfi1/user_exp_rcv.c static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages) npages 595 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!npages) npages 604 drivers/infiniband/hw/hfi1/user_exp_rcv.c for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) { npages 605 drivers/infiniband/hw/hfi1/user_exp_rcv.c this_pfn = i < npages ? page_to_pfn(pages[i]) : 0; npages 707 drivers/infiniband/hw/hfi1/user_exp_rcv.c u16 npages, pageidx, setidx = start + idx; npages 723 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages = tbuf->psets[setidx].count; npages 728 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages); npages 731 drivers/infiniband/hw/hfi1/user_exp_rcv.c mapped += npages; npages 734 drivers/infiniband/hw/hfi1/user_exp_rcv.c EXP_TID_SET(LEN, npages); npages 751 drivers/infiniband/hw/hfi1/user_exp_rcv.c u16 pageidx, unsigned int npages) npages 764 drivers/infiniband/hw/hfi1/user_exp_rcv.c node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages), npages 771 drivers/infiniband/hw/hfi1/user_exp_rcv.c npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); npages 780 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->mmu.len = npages * PAGE_SIZE; npages 782 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->npages = npages; npages 787 drivers/infiniband/hw/hfi1/user_exp_rcv.c memcpy(node->pages, pages, sizeof(struct page *) * npages); npages 797 drivers/infiniband/hw/hfi1/user_exp_rcv.c pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE, npages 802 drivers/infiniband/hw/hfi1/user_exp_rcv.c hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1); npages 803 drivers/infiniband/hw/hfi1/user_exp_rcv.c trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages, npages 849 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->npages, node->mmu.addr, node->phys, npages 858 drivers/infiniband/hw/hfi1/user_exp_rcv.c unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); npages 920 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->rcventry, node->npages, node->dma_addr); npages 928 drivers/infiniband/hw/hfi1/user_exp_rcv.c EXP_TID_SET(LEN, node->npages); npages 61 drivers/infiniband/hw/hfi1/user_exp_rcv.h unsigned int npages; npages 74 drivers/infiniband/hw/hfi1/user_exp_rcv.h unsigned int npages; npages 72 drivers/infiniband/hw/hfi1/user_pages.c u32 nlocked, u32 npages) npages 97 drivers/infiniband/hw/hfi1/user_pages.c if (pinned + npages >= ulimit && !can_lock) npages 100 drivers/infiniband/hw/hfi1/user_pages.c return ((nlocked + npages) <= size) || can_lock; npages 103 drivers/infiniband/hw/hfi1/user_pages.c int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, npages 109 drivers/infiniband/hw/hfi1/user_pages.c ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); npages 119 drivers/infiniband/hw/hfi1/user_pages.c size_t npages, bool dirty) npages 121 drivers/infiniband/hw/hfi1/user_pages.c put_user_pages_dirty_lock(p, npages, dirty); npages 124 drivers/infiniband/hw/hfi1/user_pages.c atomic64_sub(npages, &mm->pinned_vm); npages 86 drivers/infiniband/hw/hfi1/user_sdma.c unsigned start, unsigned npages); npages 765 drivers/infiniband/hw/hfi1/user_sdma.c if (unlikely(queued < datalen && pageidx == iovec->npages && npages 957 drivers/infiniband/hw/hfi1/user_sdma.c static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) npages 962 drivers/infiniband/hw/hfi1/user_sdma.c evict_data.target = npages; npages 970 drivers/infiniband/hw/hfi1/user_sdma.c int npages) npages 976 drivers/infiniband/hw/hfi1/user_sdma.c pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); npages 979 drivers/infiniband/hw/hfi1/user_sdma.c memcpy(pages, node->pages, node->npages * sizeof(*pages)); npages 981 drivers/infiniband/hw/hfi1/user_sdma.c npages -= node->npages; npages 984 drivers/infiniband/hw/hfi1/user_sdma.c atomic_read(&pq->n_locked), npages)) { npages 985 drivers/infiniband/hw/hfi1/user_sdma.c cleared = sdma_cache_evict(pq, npages); npages 986 drivers/infiniband/hw/hfi1/user_sdma.c if (cleared >= npages) npages 991 drivers/infiniband/hw/hfi1/user_sdma.c (node->npages * PAGE_SIZE)), npages, 0, npages 992 drivers/infiniband/hw/hfi1/user_sdma.c pages + node->npages); npages 997 drivers/infiniband/hw/hfi1/user_sdma.c if (pinned != npages) { npages 998 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(pq->mm, pages, node->npages, pinned); npages 1010 drivers/infiniband/hw/hfi1/user_sdma.c if (node->npages) { npages 1011 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); npages 1012 drivers/infiniband/hw/hfi1/user_sdma.c atomic_sub(node->npages, &node->pq->n_locked); npages 1019 drivers/infiniband/hw/hfi1/user_sdma.c int ret = 0, pinned, npages; npages 1036 drivers/infiniband/hw/hfi1/user_sdma.c iovec->npages = node->npages; npages 1053 drivers/infiniband/hw/hfi1/user_sdma.c npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len); npages 1054 drivers/infiniband/hw/hfi1/user_sdma.c if (node->npages < npages) { npages 1055 drivers/infiniband/hw/hfi1/user_sdma.c pinned = pin_sdma_pages(req, iovec, node, npages); npages 1060 drivers/infiniband/hw/hfi1/user_sdma.c node->npages += pinned; npages 1061 drivers/infiniband/hw/hfi1/user_sdma.c npages = node->npages; npages 1064 drivers/infiniband/hw/hfi1/user_sdma.c iovec->npages = npages; npages 1080 drivers/infiniband/hw/hfi1/user_sdma.c unsigned start, unsigned npages) npages 1082 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_release_user_pages(mm, pages + start, npages, false); npages 1518 drivers/infiniband/hw/hfi1/user_sdma.c evict_data->cleared += node->npages; npages 149 drivers/infiniband/hw/hfi1/user_sdma.h unsigned int npages; npages 156 drivers/infiniband/hw/hfi1/user_sdma.h unsigned int npages; npages 196 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->npages = 1 << order; npages 208 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->npages *= 2; npages 212 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->npages = buf->nbufs; npages 247 drivers/infiniband/hw/hns/hns_roce_alloc.c if (end > buf->npages) { npages 250 drivers/infiniband/hw/hns/hns_roce_alloc.c start, buf_cnt, buf->npages); npages 214 drivers/infiniband/hw/hns/hns_roce_cq.c u32 npages; npages 227 drivers/infiniband/hw/hns/hns_roce_cq.c npages = (ib_umem_page_count(*umem) + npages 231 drivers/infiniband/hw/hns/hns_roce_cq.c ret = hns_roce_mtt_init(hr_dev, npages, page_shift, npages 271 drivers/infiniband/hw/hns/hns_roce_cq.c ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, npages 392 drivers/infiniband/hw/hns/hns_roce_device.h u32 npages; npages 453 drivers/infiniband/hw/hns/hns_roce_device.h u32 npages; npages 1133 drivers/infiniband/hw/hns/hns_roce_device.h int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, npages 278 drivers/infiniband/hw/hns/hns_roce_hem.c int npages, npages 300 drivers/infiniband/hw/hns/hns_roce_hem.c while (npages > 0) { npages 308 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->npages = 0; npages 314 drivers/infiniband/hw/hns/hns_roce_hem.c while (1 << order > npages) npages 321 drivers/infiniband/hw/hns/hns_roce_hem.c mem = &chunk->mem[chunk->npages]; npages 327 drivers/infiniband/hw/hns/hns_roce_hem.c chunk->buf[chunk->npages] = buf; npages 330 drivers/infiniband/hw/hns/hns_roce_hem.c ++chunk->npages; npages 332 drivers/infiniband/hw/hns/hns_roce_hem.c npages -= 1 << order; npages 351 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < chunk->npages; ++i) npages 852 drivers/infiniband/hw/hns/hns_roce_hem.c for (i = 0; i < chunk->npages; ++i) { npages 83 drivers/infiniband/hw/hns/hns_roce_hem.h int npages; npages 1107 drivers/infiniband/hw/hns/hns_roce_hw_v1.c int npages; npages 1154 drivers/infiniband/hw/hns/hns_roce_hw_v1.c npages = ib_umem_page_count(mr->umem); npages 1155 drivers/infiniband/hw/hns/hns_roce_hw_v1.c dma_free_coherent(dev, npages * 8, mr->pbl_buf, npages 4181 drivers/infiniband/hw/hns/hns_roce_hw_v1.c int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + npages 4188 drivers/infiniband/hw/hns/hns_roce_hw_v1.c for (i = 0; i < npages; ++i) npages 1805 drivers/infiniband/hw/hns/hns_roce_hw_v2.c page_num = link_tbl->npages; npages 1826 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->npages); npages 1923 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->npages = pg_num; npages 1950 drivers/infiniband/hw/hns/hns_roce_hw_v2.c size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry); npages 1952 drivers/infiniband/hw/hns/hns_roce_hw_v2.c for (i = 0; i < link_tbl->npages; ++i) npages 1614 drivers/infiniband/hw/hns/hns_roce_hw_v2.h u32 npages; npages 221 drivers/infiniband/hw/hns/hns_roce_mr.c int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, npages 228 drivers/infiniband/hw/hns/hns_roce_mr.c if (!npages) { npages 238 drivers/infiniband/hw/hns/hns_roce_mr.c for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages; npages 350 drivers/infiniband/hw/hns/hns_roce_mr.c static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages, npages 355 drivers/infiniband/hw/hns/hns_roce_mr.c if (npages > pbl_bt_sz / 8) { npages 357 drivers/infiniband/hw/hns/hns_roce_mr.c npages); npages 360 drivers/infiniband/hw/hns/hns_roce_mr.c mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, npages 366 drivers/infiniband/hw/hns/hns_roce_mr.c mr->pbl_size = npages; npages 376 drivers/infiniband/hw/hns/hns_roce_mr.c static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages, npages 386 drivers/infiniband/hw/hns/hns_roce_mr.c pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); npages 394 drivers/infiniband/hw/hns/hns_roce_mr.c size = (npages - npages_allocated) * 8; npages 416 drivers/infiniband/hw/hns/hns_roce_mr.c static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages, npages 429 drivers/infiniband/hw/hns/hns_roce_mr.c pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8); npages 463 drivers/infiniband/hw/hns/hns_roce_mr.c size = (npages - npages_allocated) * 8; npages 507 drivers/infiniband/hw/hns/hns_roce_mr.c static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, npages 521 drivers/infiniband/hw/hns/hns_roce_mr.c return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz); npages 542 drivers/infiniband/hw/hns/hns_roce_mr.c if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) npages 547 drivers/infiniband/hw/hns/hns_roce_mr.c if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz)) npages 552 drivers/infiniband/hw/hns/hns_roce_mr.c mr->pbl_size = npages; npages 572 drivers/infiniband/hw/hns/hns_roce_mr.c u64 size, u32 access, int npages, npages 604 drivers/infiniband/hw/hns/hns_roce_mr.c npages * BA_BYTE_LEN, npages 610 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_mhop_alloc(hr_dev, npages, mr); npages 622 drivers/infiniband/hw/hns/hns_roce_mr.c int npages; npages 628 drivers/infiniband/hw/hns/hns_roce_mr.c npages = mr->pbl_size; npages 636 drivers/infiniband/hw/hns/hns_roce_mr.c dma_free_coherent(dev, (unsigned int)(npages * BA_BYTE_LEN), npages 651 drivers/infiniband/hw/hns/hns_roce_mr.c (npages - npages_allocated) * BA_BYTE_LEN, npages 675 drivers/infiniband/hw/hns/hns_roce_mr.c (npages - npages_allocated) * npages 706 drivers/infiniband/hw/hns/hns_roce_mr.c int npages = 0; npages 718 drivers/infiniband/hw/hns/hns_roce_mr.c npages = ib_umem_page_count(mr->umem); npages 722 drivers/infiniband/hw/hns/hns_roce_mr.c (unsigned int)(npages * BA_BYTE_LEN), npages 788 drivers/infiniband/hw/hns/hns_roce_mr.c u32 npages, u64 *page_list) npages 819 drivers/infiniband/hw/hns/hns_roce_mr.c (start_index + npages - 1) / (bt_page_size / sizeof(u64))) npages 833 drivers/infiniband/hw/hns/hns_roce_mr.c for (i = 0; i < npages; ++i) { npages 845 drivers/infiniband/hw/hns/hns_roce_mr.c u32 npages, u64 *page_list) npages 874 drivers/infiniband/hw/hns/hns_roce_mr.c while (npages > 0) { npages 875 drivers/infiniband/hw/hns/hns_roce_mr.c chunk = min_t(int, bt_page_size / sizeof(u64), npages); npages 882 drivers/infiniband/hw/hns/hns_roce_mr.c npages -= chunk; npages 897 drivers/infiniband/hw/hns/hns_roce_mr.c page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); npages 901 drivers/infiniband/hw/hns/hns_roce_mr.c for (i = 0; i < buf->npages; ++i) { npages 908 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list); npages 1217 drivers/infiniband/hw/hns/hns_roce_mr.c int npages; npages 1221 drivers/infiniband/hw/hns/hns_roce_mr.c npages = ib_umem_page_count(mr->umem); npages 1226 drivers/infiniband/hw/hns/hns_roce_mr.c dma_free_coherent(dev, npages * 8, npages 1237 drivers/infiniband/hw/hns/hns_roce_mr.c npages = ib_umem_page_count(mr->umem); npages 1240 drivers/infiniband/hw/hns/hns_roce_mr.c ret = hns_roce_mhop_alloc(hr_dev, npages, mr); npages 1244 drivers/infiniband/hw/hns/hns_roce_mr.c mr->pbl_buf = dma_alloc_coherent(dev, npages * 8, npages 1263 drivers/infiniband/hw/hns/hns_roce_mr.c npages = ib_umem_page_count(mr->umem); npages 1268 drivers/infiniband/hw/hns/hns_roce_mr.c dma_free_coherent(dev, npages * 8, npages 1429 drivers/infiniband/hw/hns/hns_roce_mr.c mr->pbl_buf[mr->npages++] = addr; npages 1439 drivers/infiniband/hw/hns/hns_roce_mr.c mr->npages = 0; npages 194 drivers/infiniband/hw/hns/hns_roce_srq.c buf->npages = (ib_umem_page_count(srq->umem) + npages 198 drivers/infiniband/hw/hns/hns_roce_srq.c ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, npages 217 drivers/infiniband/hw/hns/hns_roce_srq.c buf->npages = DIV_ROUND_UP(ib_umem_page_count(srq->idx_que.umem), npages 220 drivers/infiniband/hw/hns/hns_roce_srq.c ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, npages 286 drivers/infiniband/hw/hns/hns_roce_srq.c ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift, npages 303 drivers/infiniband/hw/hns/hns_roce_srq.c ret = hns_roce_mtt_init(hr_dev, srq->idx_que.idx_buf.npages, npages 1340 drivers/infiniband/hw/i40iw/i40iw_verbs.c static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) npages 1344 drivers/infiniband/hw/i40iw/i40iw_verbs.c for (pg_idx = 0; pg_idx < npages; pg_idx++) { npages 1617 drivers/infiniband/hw/i40iw/i40iw_verbs.c if (unlikely(iwmr->npages == iwmr->page_cnt)) npages 1621 drivers/infiniband/hw/i40iw/i40iw_verbs.c pbl[iwmr->npages++] = cpu_to_le64(addr); npages 1636 drivers/infiniband/hw/i40iw/i40iw_verbs.c iwmr->npages = 0; npages 2256 drivers/infiniband/hw/i40iw/i40iw_verbs.c if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) npages 98 drivers/infiniband/hw/i40iw/i40iw_verbs.h u32 npages; npages 112 drivers/infiniband/hw/mlx4/cq.c err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, npages 137 drivers/infiniband/hw/mlx4/mlx4_ib.h u32 npages; npages 799 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, npages 98 drivers/infiniband/hw/mlx4/mr.c int *start_index, int *npages) npages 127 drivers/infiniband/hw/mlx4/mr.c pages[*npages] = cur_start_addr + (mtt_size * k); npages 128 drivers/infiniband/hw/mlx4/mr.c (*npages)++; npages 133 drivers/infiniband/hw/mlx4/mr.c if (*npages == PAGE_SIZE / sizeof(u64)) { npages 135 drivers/infiniband/hw/mlx4/mr.c *npages, pages); npages 139 drivers/infiniband/hw/mlx4/mr.c (*start_index) += *npages; npages 140 drivers/infiniband/hw/mlx4/mr.c *npages = 0; npages 192 drivers/infiniband/hw/mlx4/mr.c int npages = 0; npages 218 drivers/infiniband/hw/mlx4/mr.c &npages); npages 235 drivers/infiniband/hw/mlx4/mr.c &start_index, &npages); npages 240 drivers/infiniband/hw/mlx4/mr.c if (npages) npages 241 drivers/infiniband/hw/mlx4/mr.c err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); npages 736 drivers/infiniband/hw/mlx4/mr.c int npages, u64 iova) npages 741 drivers/infiniband/hw/mlx4/mr.c return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, npages 798 drivers/infiniband/hw/mlx4/mr.c if (unlikely(mr->npages == mr->max_pages)) npages 801 drivers/infiniband/hw/mlx4/mr.c mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT); npages 812 drivers/infiniband/hw/mlx4/mr.c mr->npages = 0; npages 1174 drivers/infiniband/hw/mlx4/qp.c err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, npages 157 drivers/infiniband/hw/mlx4/srq.c err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift, npages 712 drivers/infiniband/hw/mlx5/cq.c int npages; npages 746 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, npages 749 drivers/infiniband/hw/mlx5/cq.c ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); npages 869 drivers/infiniband/hw/mlx5/cq.c cq->buf.frag_buf.npages; npages 1122 drivers/infiniband/hw/mlx5/cq.c int npages; npages 1143 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, npages 1273 drivers/infiniband/hw/mlx5/cq.c npas = frag_buf->npages; npages 2104 drivers/infiniband/hw/mlx5/devx.c int npages; npages 2129 drivers/infiniband/hw/mlx5/devx.c MLX5_MKEY_PAGE_SHIFT_MASK, &npages, npages 2132 drivers/infiniband/hw/mlx5/devx.c if (!npages) { npages 2198 drivers/infiniband/hw/mlx5/main.c u32 npages = map_size >> PAGE_SHIFT; npages 2201 drivers/infiniband/hw/mlx5/main.c if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != npages 2202 drivers/infiniband/hw/mlx5/main.c page_idx + npages) npages 606 drivers/infiniband/hw/mlx5/mlx5_ib.h int npages; npages 1160 drivers/infiniband/hw/mlx5/mlx5_ib.h int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, npages 741 drivers/infiniband/hw/mlx5/mr.c int npages; npages 744 drivers/infiniband/hw/mlx5/mr.c npages = ALIGN(len + offset, page_size) >> page_shift; npages 745 drivers/infiniband/hw/mlx5/mr.c return (npages + 1) / 2; npages 757 drivers/infiniband/hw/mlx5/mr.c struct ib_umem **umem, int *npages, int *page_shift, npages 778 drivers/infiniband/hw/mlx5/mr.c *npages = *ncont << (*page_shift - PAGE_SHIFT); npages 788 drivers/infiniband/hw/mlx5/mr.c mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, npages 792 drivers/infiniband/hw/mlx5/mr.c if (!*npages) { npages 801 drivers/infiniband/hw/mlx5/mr.c *npages, *ncont, *order, *page_shift); npages 851 drivers/infiniband/hw/mlx5/mr.c u64 virt_addr, u64 len, int npages, npages 885 drivers/infiniband/hw/mlx5/mr.c static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages, npages 895 drivers/infiniband/hw/mlx5/mr.c mlx5_odp_populate_klm(xlt, idx, npages, mr, flags); npages 896 drivers/infiniband/hw/mlx5/mr.c return npages; npages 899 drivers/infiniband/hw/mlx5/mr.c npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx); npages 903 drivers/infiniband/hw/mlx5/mr.c idx, npages, xlt, npages 908 drivers/infiniband/hw/mlx5/mr.c memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0, npages 909 drivers/infiniband/hw/mlx5/mr.c size - npages * sizeof(struct mlx5_mtt)); npages 912 drivers/infiniband/hw/mlx5/mr.c return npages; npages 919 drivers/infiniband/hw/mlx5/mr.c int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, npages 949 drivers/infiniband/hw/mlx5/mr.c npages += idx & page_mask; npages 956 drivers/infiniband/hw/mlx5/mr.c pages_to_map = ALIGN(npages, page_align); npages 1005 drivers/infiniband/hw/mlx5/mr.c npages = min_t(int, pages_iter, pages_to_map - pages_mapped); npages 1007 drivers/infiniband/hw/mlx5/mr.c npages = populate_xlt(mr, idx, npages, xlt, npages 1012 drivers/infiniband/hw/mlx5/mr.c sg.length = ALIGN(npages * desc_size, npages 1052 drivers/infiniband/hw/mlx5/mr.c struct ib_umem *umem, int npages, npages 1074 drivers/infiniband/hw/mlx5/mr.c inlen += sizeof(*pas) * roundup(npages, 2); npages 1137 drivers/infiniband/hw/mlx5/mr.c int npages, u64 length, int access_flags) npages 1139 drivers/infiniband/hw/mlx5/mr.c mr->npages = npages; npages 1140 drivers/infiniband/hw/mlx5/mr.c atomic_add(npages, &dev->mdev->priv.reg_pages); npages 1259 drivers/infiniband/hw/mlx5/mr.c int npages; npages 1283 drivers/infiniband/hw/mlx5/mr.c &npages, &page_shift, &ncont, &order); npages 1321 drivers/infiniband/hw/mlx5/mr.c set_mr_fields(dev, mr, npages, length, access_flags); npages 1404 drivers/infiniband/hw/mlx5/mr.c int npages = 0; npages 1413 drivers/infiniband/hw/mlx5/mr.c atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); npages 1438 drivers/infiniband/hw/mlx5/mr.c &mr->umem, &npages, &page_shift, &ncont, npages 1482 drivers/infiniband/hw/mlx5/mr.c err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, npages 1492 drivers/infiniband/hw/mlx5/mr.c set_mr_fields(dev, mr, npages, len, access_flags); npages 1575 drivers/infiniband/hw/mlx5/mr.c int npages = mr->npages; npages 1607 drivers/infiniband/hw/mlx5/mr.c atomic_sub(npages, &dev->mdev->priv.reg_pages); npages 1622 drivers/infiniband/hw/mlx5/mr.c atomic_sub(npages, &dev->mdev->priv.reg_pages); npages 319 drivers/infiniband/hw/mlx5/odp.c if (unlikely(!umem_odp->npages && mr->parent && npages 615 drivers/infiniband/hw/mlx5/odp.c int npages = 0, current_seq, page_shift, ret, np; npages 694 drivers/infiniband/hw/mlx5/odp.c npages += np << (page_shift - PAGE_SHIFT); npages 712 drivers/infiniband/hw/mlx5/odp.c return npages; npages 780 drivers/infiniband/hw/mlx5/odp.c int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0; npages 842 drivers/infiniband/hw/mlx5/odp.c npages += ret; npages 933 drivers/infiniband/hw/mlx5/odp.c return ret ? ret : npages; npages 959 drivers/infiniband/hw/mlx5/odp.c int ret = 0, npages = 0; npages 1015 drivers/infiniband/hw/mlx5/odp.c npages += ret; npages 1018 drivers/infiniband/hw/mlx5/odp.c return ret < 0 ? ret : npages; npages 747 drivers/infiniband/hw/mlx5/qp.c struct ib_umem **umem, int *npages, int *page_shift, npages 758 drivers/infiniband/hw/mlx5/qp.c mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL); npages 767 drivers/infiniband/hw/mlx5/qp.c addr, size, *npages, *page_shift, *ncont, *offset); npages 801 drivers/infiniband/hw/mlx5/qp.c int npages; npages 816 drivers/infiniband/hw/mlx5/qp.c mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift, npages 832 drivers/infiniband/hw/mlx5/qp.c npages, page_shift, ncont, offset); npages 867 drivers/infiniband/hw/mlx5/qp.c int npages; npages 921 drivers/infiniband/hw/mlx5/qp.c &npages, &page_shift, &ncont, &offset); npages 1092 drivers/infiniband/hw/mlx5/qp.c MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; npages 1233 drivers/infiniband/hw/mlx5/qp.c int npages; npages 1238 drivers/infiniband/hw/mlx5/qp.c &sq->ubuffer.umem, &npages, &page_shift, &ncont, npages 54 drivers/infiniband/hw/mlx5/srq.c int npages; npages 90 drivers/infiniband/hw/mlx5/srq.c mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages, npages 165 drivers/infiniband/hw/mlx5/srq.c in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL); npages 200 drivers/infiniband/hw/mthca/mthca_allocator.c int npages, shift; npages 207 drivers/infiniband/hw/mthca/mthca_allocator.c npages = 1; npages 219 drivers/infiniband/hw/mthca/mthca_allocator.c npages *= 2; npages 222 drivers/infiniband/hw/mthca/mthca_allocator.c dma_list = kmalloc_array(npages, sizeof(*dma_list), npages 227 drivers/infiniband/hw/mthca/mthca_allocator.c for (i = 0; i < npages; ++i) npages 231 drivers/infiniband/hw/mthca/mthca_allocator.c npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; npages 234 drivers/infiniband/hw/mthca/mthca_allocator.c dma_list = kmalloc_array(npages, sizeof(*dma_list), npages 239 drivers/infiniband/hw/mthca/mthca_allocator.c buf->page_list = kmalloc_array(npages, npages 245 drivers/infiniband/hw/mthca/mthca_allocator.c for (i = 0; i < npages; ++i) npages 248 drivers/infiniband/hw/mthca/mthca_allocator.c for (i = 0; i < npages; ++i) { npages 263 drivers/infiniband/hw/mthca/mthca_allocator.c dma_list, shift, npages, npages 470 drivers/infiniband/hw/mthca/mthca_eq.c int npages; npages 480 drivers/infiniband/hw/mthca/mthca_eq.c npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; npages 482 drivers/infiniband/hw/mthca/mthca_eq.c eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), npages 487 drivers/infiniband/hw/mthca/mthca_eq.c for (i = 0; i < npages; ++i) npages 490 drivers/infiniband/hw/mthca/mthca_eq.c dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); npages 499 drivers/infiniband/hw/mthca/mthca_eq.c for (i = 0; i < npages; ++i) { npages 519 drivers/infiniband/hw/mthca/mthca_eq.c dma_list, PAGE_SHIFT, npages, npages 520 drivers/infiniband/hw/mthca/mthca_eq.c 0, npages * PAGE_SIZE, npages 571 drivers/infiniband/hw/mthca/mthca_eq.c for (i = 0; i < npages; ++i) npages 593 drivers/infiniband/hw/mthca/mthca_eq.c int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / npages 619 drivers/infiniband/hw/mthca/mthca_eq.c for (i = 0; i < npages; ++i) npages 69 drivers/infiniband/hw/mthca/mthca_memfree.c pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, npages 72 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) npages 81 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) { npages 137 drivers/infiniband/hw/mthca/mthca_memfree.c struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, npages 157 drivers/infiniband/hw/mthca/mthca_memfree.c while (npages > 0) { npages 165 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages = 0; npages 170 drivers/infiniband/hw/mthca/mthca_memfree.c while (1 << cur_order > npages) npages 175 drivers/infiniband/hw/mthca/mthca_memfree.c &chunk->mem[chunk->npages], npages 178 drivers/infiniband/hw/mthca/mthca_memfree.c ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages], npages 182 drivers/infiniband/hw/mthca/mthca_memfree.c ++chunk->npages; npages 186 drivers/infiniband/hw/mthca/mthca_memfree.c else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { npages 188 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages, npages 195 drivers/infiniband/hw/mthca/mthca_memfree.c if (chunk->npages == MTHCA_ICM_CHUNK_LEN) npages 198 drivers/infiniband/hw/mthca/mthca_memfree.c npages -= 1 << cur_order; npages 208 drivers/infiniband/hw/mthca/mthca_memfree.c chunk->npages, npages 298 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < chunk->npages; ++i) { npages 526 drivers/infiniband/hw/mthca/mthca_memfree.c int npages; npages 532 drivers/infiniband/hw/mthca/mthca_memfree.c npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; npages 533 drivers/infiniband/hw/mthca/mthca_memfree.c db_tab = kmalloc(struct_size(db_tab, page, npages), GFP_KERNEL); npages 538 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < npages; ++i) { npages 590 drivers/infiniband/hw/mthca/mthca_memfree.c start = dev->db_tab->npages - 1; npages 713 drivers/infiniband/hw/mthca/mthca_memfree.c dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; npages 715 drivers/infiniband/hw/mthca/mthca_memfree.c dev->db_tab->min_group2 = dev->db_tab->npages - 1; npages 717 drivers/infiniband/hw/mthca/mthca_memfree.c dev->db_tab->page = kmalloc_array(dev->db_tab->npages, npages 725 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < dev->db_tab->npages; ++i) npages 744 drivers/infiniband/hw/mthca/mthca_memfree.c for (i = 0; i < dev->db_tab->npages; ++i) { npages 53 drivers/infiniband/hw/mthca/mthca_memfree.h int npages; npages 82 drivers/infiniband/hw/mthca/mthca_memfree.h struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, npages 145 drivers/infiniband/hw/mthca/mthca_memfree.h int npages; npages 198 drivers/infiniband/hw/ocrdma/ocrdma.h u32 npages; npages 2070 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c fast_reg->num_sges = mr->npages; npages 2074 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c for (i = 0; i < mr->npages; i++) { npages 2968 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (unlikely(mr->npages == mr->hwmr.num_pbes)) npages 2971 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c mr->pages[mr->npages++] = addr; npages 2981 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c mr->npages = 0; npages 491 drivers/infiniband/hw/qedr/qedr.h u32 npages; npages 2802 drivers/infiniband/hw/qedr/verbs.c if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) { npages 2803 drivers/infiniband/hw/qedr/verbs.c DP_ERR(mr->dev, "qedr_set_page fails when %d\n", mr->npages); npages 2808 drivers/infiniband/hw/qedr/verbs.c mr->npages, addr); npages 2811 drivers/infiniband/hw/qedr/verbs.c pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page); npages 2813 drivers/infiniband/hw/qedr/verbs.c pbe += mr->npages % pbes_in_page; npages 2817 drivers/infiniband/hw/qedr/verbs.c mr->npages++; npages 2847 drivers/infiniband/hw/qedr/verbs.c mr->npages = 0; npages 661 drivers/infiniband/hw/qib/qib_user_sdma.c unsigned long addr, int tlen, int npages) npages 667 drivers/infiniband/hw/qib/qib_user_sdma.c while (npages) { npages 668 drivers/infiniband/hw/qib/qib_user_sdma.c if (npages > 8) npages 671 drivers/infiniband/hw/qib/qib_user_sdma.c j = npages; npages 701 drivers/infiniband/hw/qib/qib_user_sdma.c npages -= j; npages 725 drivers/infiniband/hw/qib/qib_user_sdma.c const int npages = qib_user_sdma_num_pages(iov + idx); npages 729 drivers/infiniband/hw/qib/qib_user_sdma.c iov[idx].iov_len, npages); npages 761 drivers/infiniband/hw/qib/qib_user_sdma.c unsigned long niov, int npages) npages 766 drivers/infiniband/hw/qib/qib_user_sdma.c npages >= ARRAY_SIZE(pkt->addr)) npages 827 drivers/infiniband/hw/qib/qib_user_sdma.c int npages = 0; npages 886 drivers/infiniband/hw/qib/qib_user_sdma.c npages += qib_user_sdma_num_pages(&iov[idx]); npages 910 drivers/infiniband/hw/qib/qib_user_sdma.c n = npages*((2*PAGE_SIZE/frag_size)+1); npages 986 drivers/infiniband/hw/qib/qib_user_sdma.c nfrags, npages); npages 95 drivers/infiniband/hw/usnic/usnic_uiom.c unsigned long npages; npages 123 drivers/infiniband/hw/usnic/usnic_uiom.c npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; npages 128 drivers/infiniband/hw/usnic/usnic_uiom.c locked = atomic64_add_return(npages, ¤t->mm->pinned_vm); npages 143 drivers/infiniband/hw/usnic/usnic_uiom.c while (npages) { npages 145 drivers/infiniband/hw/usnic/usnic_uiom.c min_t(unsigned long, npages, npages 153 drivers/infiniband/hw/usnic/usnic_uiom.c npages -= ret; npages 186 drivers/infiniband/hw/usnic/usnic_uiom.c atomic64_sub(npages, ¤t->mm->pinned_vm); npages 218 drivers/infiniband/hw/usnic/usnic_uiom.c int npages; npages 224 drivers/infiniband/hw/usnic/usnic_uiom.c npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; npages 226 drivers/infiniband/hw/usnic/usnic_uiom.c vpn_last = vpn_start + npages - 1; npages 340 drivers/infiniband/hw/usnic/usnic_uiom.c unsigned long npages; npages 355 drivers/infiniband/hw/usnic/usnic_uiom.c npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; npages 357 drivers/infiniband/hw/usnic/usnic_uiom.c vpn_last = vpn_start + npages - 1; npages 82 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u64 npages; npages 146 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u32 npages; npages 176 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h int npages; npages 191 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h int npages; npages 537 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h u64 npages, bool alloc_pages); npages 109 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c int npages; npages 145 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c npages = ib_umem_page_count(cq->umem); npages 148 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c npages = 1 + (entries * sizeof(struct pvrdma_cqe) + npages 155 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { npages 162 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); npages 181 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cmd->nchunks = npages; npages 53 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c u64 npages, bool alloc_pages) npages 57 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) npages 67 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; npages 81 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->npages = npages; npages 84 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c pdir->pages = kcalloc(npages, sizeof(*pdir->pages), npages 89 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c for (i = 0; i < pdir->npages; i++) { npages 127 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { npages 173 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (idx >= pdir->npages) npages 189 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (offset >= pdir->npages) npages 213 drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c if (num_pages > pdir->npages) npages 122 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c int ret, npages; npages 136 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c npages = ib_umem_num_pages(umem); npages 137 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { npages 139 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c npages); npages 154 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); npages 171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c cmd->nchunks = npages; npages 305 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c if (mr->npages == mr->max_pages) npages 308 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c mr->pages[mr->npages++] = addr; npages 319 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c mr->npages = 0; npages 291 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->npages = qp->npages_send + qp->npages_recv; npages 303 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->npages = qp->npages_send + qp->npages_recv; npages 312 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c if (qp->npages < 0 || qp->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { npages 319 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c ret = pvrdma_page_dir_init(dev, &qp->pdir, qp->npages, npages 366 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c cmd->total_chunks = qp->npages; npages 601 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c wqe_hdr->wr.fast_reg.page_list_len = mr->npages; npages 607 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c mr->npages); npages 155 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c srq->npages = ib_umem_page_count(srq->umem); npages 157 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) { npages 164 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c ret = pvrdma_page_dir_init(dev, &srq->pdir, srq->npages, false); npages 176 drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c cmd->nchunks = srq->npages; npages 523 drivers/infiniband/ulp/iser/iscsi_iser.h int npages; npages 157 drivers/infiniband/ulp/iser/iser_memory.c page_vec->npages, page_vec->fake_mr.length); npages 158 drivers/infiniband/ulp/iser/iser_memory.c for (i = 0; i < page_vec->npages; i++) npages 221 drivers/infiniband/ulp/iser/iser_memory.c page_vec->pages[page_vec->npages++] = addr; npages 239 drivers/infiniband/ulp/iser/iser_memory.c page_vec->npages = 0; npages 251 drivers/infiniband/ulp/iser/iser_memory.c page_vec->npages, page_vec->pages[0]); npages 1482 drivers/infiniband/ulp/srp/ib_srp.c if (state->npages == 0) npages 1485 drivers/infiniband/ulp/srp/ib_srp.c if (state->npages == 1 && target->global_rkey) { npages 1492 drivers/infiniband/ulp/srp/ib_srp.c state->npages, io_addr); npages 1503 drivers/infiniband/ulp/srp/ib_srp.c state->npages = 0; npages 1615 drivers/infiniband/ulp/srp/ib_srp.c if (state->npages == dev->max_pages_per_mr || npages 1616 drivers/infiniband/ulp/srp/ib_srp.c (state->npages > 0 && offset != 0)) { npages 1624 drivers/infiniband/ulp/srp/ib_srp.c if (!state->npages) npages 1626 drivers/infiniband/ulp/srp/ib_srp.c state->pages[state->npages++] = dma_addr & dev->mr_page_mask; npages 1753 drivers/infiniband/ulp/srp/ib_srp.c state.npages = 1; npages 348 drivers/infiniband/ulp/srp/ib_srp.h unsigned int npages; npages 2551 drivers/iommu/amd_iommu.c int i, npages = 0; npages 2560 drivers/iommu/amd_iommu.c s->dma_address = npages << PAGE_SHIFT; npages 2561 drivers/iommu/amd_iommu.c p = npages % boundary_size; npages 2564 drivers/iommu/amd_iommu.c npages += boundary_size - p; npages 2565 drivers/iommu/amd_iommu.c npages += n; npages 2568 drivers/iommu/amd_iommu.c return npages; npages 2579 drivers/iommu/amd_iommu.c int mapped_pages = 0, npages = 0, prot = 0, i; npages 2594 drivers/iommu/amd_iommu.c npages = sg_num_pages(dev, sglist, nelems); npages 2596 drivers/iommu/amd_iommu.c address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); npages 2639 drivers/iommu/amd_iommu.c npages, ret); npages 2656 drivers/iommu/amd_iommu.c free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); npages 2673 drivers/iommu/amd_iommu.c int npages; npages 2681 drivers/iommu/amd_iommu.c npages = sg_num_pages(dev, sglist, nelems); npages 2683 drivers/iommu/amd_iommu.c __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); npages 5484 drivers/iommu/intel-iommu.c unsigned int npages; npages 5499 drivers/iommu/intel-iommu.c npages = last_pfn - start_pfn + 1; npages 5503 drivers/iommu/intel-iommu.c start_pfn, npages, !freelist, 0); npages 286 drivers/misc/habanalabs/debugfs.c for (j = 0 ; j < phys_pg_pack->npages ; j++) { npages 841 drivers/misc/habanalabs/habanalabs.h u64 npages; npages 90 drivers/misc/habanalabs/memory.c phys_pg_pack->npages = num_pgs; npages 277 drivers/misc/habanalabs/memory.c for (i = 0; i < phys_pg_pack->npages ; i++) npages 281 drivers/misc/habanalabs/memory.c for (i = 0 ; i < phys_pg_pack->npages ; i++) { npages 653 drivers/misc/habanalabs/memory.c u32 npages, page_size = PAGE_SIZE; npages 674 drivers/misc/habanalabs/memory.c npages = get_sg_info(sg, &dma_addr); npages 676 drivers/misc/habanalabs/memory.c total_npages += npages; npages 678 drivers/misc/habanalabs/memory.c if ((npages % PGS_IN_2MB_PAGE) || npages 697 drivers/misc/habanalabs/memory.c phys_pg_pack->npages = total_npages; npages 703 drivers/misc/habanalabs/memory.c npages = get_sg_info(sg, &dma_addr); npages 712 drivers/misc/habanalabs/memory.c while (npages) { npages 717 drivers/misc/habanalabs/memory.c npages -= PGS_IN_2MB_PAGE; npages 719 drivers/misc/habanalabs/memory.c npages--; npages 753 drivers/misc/habanalabs/memory.c for (i = 0 ; i < phys_pg_pack->npages ; i++) { npages 760 drivers/misc/habanalabs/memory.c phys_pg_pack->handle, phys_pg_pack->npages, npages 1043 drivers/misc/habanalabs/memory.c for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { npages 1232 drivers/misc/habanalabs/memory.c u32 npages, offset; npages 1260 drivers/misc/habanalabs/memory.c npages = (end - start) >> PAGE_SHIFT; npages 1267 drivers/misc/habanalabs/memory.c userptr->vec = frame_vector_create(npages); npages 1273 drivers/misc/habanalabs/memory.c rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, npages 1276 drivers/misc/habanalabs/memory.c if (rc != npages) { npages 1300 drivers/misc/habanalabs/memory.c npages, offset, size, GFP_ATOMIC); npages 671 drivers/misc/habanalabs/mmu.c u32 real_page_size, npages; npages 693 drivers/misc/habanalabs/mmu.c npages = page_size / real_page_size; npages 696 drivers/misc/habanalabs/mmu.c for (i = 0 ; i < npages ; i++) { npages 894 drivers/misc/habanalabs/mmu.c u32 real_page_size, npages; npages 920 drivers/misc/habanalabs/mmu.c npages = page_size / real_page_size; npages 924 drivers/misc/habanalabs/mmu.c for (i = 0 ; i < npages ; i++) { npages 101 drivers/net/ethernet/google/gve/gve_rx.c u32 slots, npages; npages 147 drivers/net/ethernet/google/gve/gve_rx.c npages = bytes / PAGE_SIZE; npages 148 drivers/net/ethernet/google/gve/gve_rx.c if (npages * PAGE_SIZE != bytes) { npages 583 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->npages = 1; npages 595 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->npages *= 2; npages 617 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->npages = buf->nbufs; npages 789 drivers/net/ethernet/mellanox/mlx4/alloc.c err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, npages 336 drivers/net/ethernet/mellanox/mlx4/cq.c for (i = 0; i < buf->npages; i++) npages 975 drivers/net/ethernet/mellanox/mlx4/eq.c int npages; npages 987 drivers/net/ethernet/mellanox/mlx4/eq.c npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; npages 989 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), npages 994 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < npages; ++i) npages 997 drivers/net/ethernet/mellanox/mlx4/eq.c dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); npages 1006 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < npages; ++i) { npages 1028 drivers/net/ethernet/mellanox/mlx4/eq.c err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); npages 1032 drivers/net/ethernet/mellanox/mlx4/eq.c err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); npages 1072 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < npages; ++i) npages 1097 drivers/net/ethernet/mellanox/mlx4/eq.c int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; npages 1107 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < npages; ++i) npages 60 drivers/net/ethernet/mellanox/mlx4/icm.c dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages, npages 63 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) npages 72 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) npages 132 drivers/net/ethernet/mellanox/mlx4/icm.c struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, npages 159 drivers/net/ethernet/mellanox/mlx4/icm.c while (npages > 0) { npages 179 drivers/net/ethernet/mellanox/mlx4/icm.c while (1 << cur_order > npages) npages 188 drivers/net/ethernet/mellanox/mlx4/icm.c &chunk->buf[chunk->npages], npages 191 drivers/net/ethernet/mellanox/mlx4/icm.c ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages], npages 202 drivers/net/ethernet/mellanox/mlx4/icm.c ++chunk->npages; npages 206 drivers/net/ethernet/mellanox/mlx4/icm.c else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { npages 208 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->sg, chunk->npages, npages 215 drivers/net/ethernet/mellanox/mlx4/icm.c if (chunk->npages == MLX4_ICM_CHUNK_LEN) npages 218 drivers/net/ethernet/mellanox/mlx4/icm.c npages -= 1 << cur_order; npages 223 drivers/net/ethernet/mellanox/mlx4/icm.c chunk->npages, DMA_BIDIRECTIONAL); npages 336 drivers/net/ethernet/mellanox/mlx4/icm.c for (i = 0; i < chunk->npages; ++i) { npages 58 drivers/net/ethernet/mellanox/mlx4/icm.h int npages; npages 80 drivers/net/ethernet/mellanox/mlx4/icm.h struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, npages 1032 drivers/net/ethernet/mellanox/mlx4/mlx4.h int start_index, int npages, u64 *page_list); npages 194 drivers/net/ethernet/mellanox/mlx4/mr.c int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, npages 199 drivers/net/ethernet/mellanox/mlx4/mr.c if (!npages) { npages 206 drivers/net/ethernet/mellanox/mlx4/mr.c for (mtt->order = 0, i = 1; i < npages; i <<= 1) npages 418 drivers/net/ethernet/mellanox/mlx4/mr.c u64 iova, u64 size, u32 access, int npages, npages 428 drivers/net/ethernet/mellanox/mlx4/mr.c return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); npages 528 drivers/net/ethernet/mellanox/mlx4/mr.c int npages, int page_shift, struct mlx4_mr *mr) npages 538 drivers/net/ethernet/mellanox/mlx4/mr.c access, npages, page_shift, mr); npages 590 drivers/net/ethernet/mellanox/mlx4/mr.c u64 iova, u64 size, int npages, npages 595 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); npages 693 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) npages 707 drivers/net/ethernet/mellanox/mlx4/mr.c npages * sizeof(u64), DMA_TO_DEVICE); npages 709 drivers/net/ethernet/mellanox/mlx4/mr.c for (i = 0; i < npages; ++i) npages 713 drivers/net/ethernet/mellanox/mlx4/mr.c npages * sizeof(u64), DMA_TO_DEVICE); npages 719 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) npages 731 drivers/net/ethernet/mellanox/mlx4/mr.c chunk = min_t(int, max_mtts_first_page, npages); npages 733 drivers/net/ethernet/mellanox/mlx4/mr.c while (npages > 0) { npages 737 drivers/net/ethernet/mellanox/mlx4/mr.c npages -= chunk; npages 741 drivers/net/ethernet/mellanox/mlx4/mr.c chunk = min_t(int, mtts_per_page, npages); npages 747 drivers/net/ethernet/mellanox/mlx4/mr.c int start_index, int npages, u64 *page_list) npages 764 drivers/net/ethernet/mellanox/mlx4/mr.c while (npages > 0) { npages 766 drivers/net/ethernet/mellanox/mlx4/mr.c npages); npages 778 drivers/net/ethernet/mellanox/mlx4/mr.c npages -= chunk; npages 786 drivers/net/ethernet/mellanox/mlx4/mr.c return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list); npages 797 drivers/net/ethernet/mellanox/mlx4/mr.c page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); npages 801 drivers/net/ethernet/mellanox/mlx4/mr.c for (i = 0; i < buf->npages; ++i) npages 807 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); npages 970 drivers/net/ethernet/mellanox/mlx4/mr.c int npages, u64 iova) npages 974 drivers/net/ethernet/mellanox/mlx4/mr.c if (npages > fmr->max_pages) npages 985 drivers/net/ethernet/mellanox/mlx4/mr.c for (i = 0; i < npages; ++i) { npages 997 drivers/net/ethernet/mellanox/mlx4/mr.c int npages, u64 iova, u32 *lkey, u32 *rkey) npages 1002 drivers/net/ethernet/mellanox/mlx4/mr.c err = mlx4_check_fmr(fmr, page_list, npages, iova); npages 1018 drivers/net/ethernet/mellanox/mlx4/mr.c npages * sizeof(u64), DMA_TO_DEVICE); npages 1020 drivers/net/ethernet/mellanox/mlx4/mr.c for (i = 0; i < npages; ++i) npages 1024 drivers/net/ethernet/mellanox/mlx4/mr.c npages * sizeof(u64), DMA_TO_DEVICE); npages 1028 drivers/net/ethernet/mellanox/mlx4/mr.c fmr->mpt->length = cpu_to_be64(npages * (1ull << fmr->page_shift)); npages 3274 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c int npages = vhcr->in_modifier; npages 3277 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = get_containing_mtt(dev, slave, start, npages, &rmtt); npages 3288 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c for (i = 0; i < npages; ++i) npages 3291 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages, npages 80 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->npages = 1; npages 96 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->npages *= 2; npages 127 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); npages 129 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), npages 134 drivers/net/ethernet/mellanox/mlx5/core/alloc.c for (i = 0; i < buf->npages; i++) { npages 169 drivers/net/ethernet/mellanox/mlx5/core/alloc.c for (i = 0; i < buf->npages; i++) { npages 294 drivers/net/ethernet/mellanox/mlx5/core/alloc.c for (i = 0; i < buf->npages; i++) { npages 306 drivers/net/ethernet/mellanox/mlx5/core/alloc.c for (i = 0; i < buf->npages; i++) npages 94 drivers/net/ethernet/mellanox/mlx5/core/en.h #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) npages 268 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u64 npages, u8 page_shift, npages 290 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET64(mkc, mkc, len, npages << page_shift); npages 292 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_MTT_OCTW(npages)); npages 686 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(u64) * rq->wq_ctrl.buf.npages; npages 1215 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(u64) * csp->wq_ctrl->buf.npages; npages 1615 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(u64) * cq->wq_ctrl.buf.npages; npages 271 drivers/net/ethernet/mellanox/mlx5/core/eq.c MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; npages 459 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c sizeof(u64) * conn->cq.wq_ctrl.buf.npages; npages 574 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.wq_ctrl.buf.npages; npages 52 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c s32 npages; npages 137 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c s32 *npages, int boot) npages 153 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c *npages = MLX5_GET(query_pages_out, out, num_pages); npages 275 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, npages 285 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); npages 293 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c for (i = 0; i < npages; i++) { npages 310 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c MLX5_SET(manage_pages_in, in, input_num_entries, npages); npages 316 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c func_id, npages, err); npages 320 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c dev->priv.fw_pages += npages; npages 322 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c dev->priv.vfs_pages += npages; npages 324 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c dev->priv.peer_pf_pages += npages; npages 327 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c npages, ec_function, func_id, err); npages 348 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c u32 npages; npages 355 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c npages = MLX5_GET(manage_pages_in, in, input_num_entries); npages 359 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c while (p && i < npages) { npages 373 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, npages 386 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]); npages 394 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c MLX5_SET(manage_pages_in, in, input_num_entries, npages); npages 397 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); npages 405 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c if (num_claimed > npages) { npages 407 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c num_claimed, npages); npages 435 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c if (req->npages < 0) npages 436 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, npages 438 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c else if (req->npages > 0) npages 439 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); npages 443 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c req->npages < 0 ? "reclaim" : "give", err); npages 461 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c s32 npages; npages 468 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c npages = be32_to_cpu(eqe->data.req_pages.num_pages); npages 471 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c func_id, npages); npages 480 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c req->npages = npages; npages 490 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c s32 uninitialized_var(npages); npages 493 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); npages 498 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c npages, boot ? "boot" : "init", func_id); npages 500 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); npages 154 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->wq_ctrl.buf.npages; npages 735 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c sizeof(u64) * cq->wq_ctrl.buf.npages; npages 205 drivers/nvme/host/pci.c int npages; /* In the PRP list. 0 means small pool in use */ npages 546 drivers/nvme/host/pci.c if (iod->npages == 0) npages 550 drivers/nvme/host/pci.c for (i = 0; i < iod->npages; i++) { npages 624 drivers/nvme/host/pci.c iod->npages = 0; npages 627 drivers/nvme/host/pci.c iod->npages = 1; npages 633 drivers/nvme/host/pci.c iod->npages = -1; npages 645 drivers/nvme/host/pci.c list[iod->npages++] = prp_list; npages 719 drivers/nvme/host/pci.c iod->npages = 0; npages 722 drivers/nvme/host/pci.c iod->npages = 1; npages 727 drivers/nvme/host/pci.c iod->npages = -1; npages 746 drivers/nvme/host/pci.c nvme_pci_iod_list(req)[iod->npages++] = sg_list; npages 874 drivers/nvme/host/pci.c iod->npages = -1; npages 28 drivers/staging/android/ion/ion_heap.c int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; npages 29 drivers/staging/android/ion/ion_heap.c struct page **pages = vmalloc(array_size(npages, npages 45 drivers/staging/android/ion/ion_heap.c BUG_ON(i >= npages); npages 49 drivers/staging/android/ion/ion_heap.c vaddr = vmap(pages, npages, VM_MAP, pgprot); npages 223 drivers/usb/mon/mon_bin.c static int mon_alloc_buff(struct mon_pgmap *map, int npages); npages 224 drivers/usb/mon/mon_bin.c static void mon_free_buff(struct mon_pgmap *map, int npages); npages 1325 drivers/usb/mon/mon_bin.c static int mon_alloc_buff(struct mon_pgmap *map, int npages) npages 1330 drivers/usb/mon/mon_bin.c for (n = 0; n < npages; n++) { npages 1343 drivers/usb/mon/mon_bin.c static void mon_free_buff(struct mon_pgmap *map, int npages) npages 1347 drivers/usb/mon/mon_bin.c for (n = 0; n < npages; n++) npages 631 drivers/vhost/scsi.c unsigned int npages = 0; npages 643 drivers/vhost/scsi.c sg_set_page(sg++, pages[npages++], n, offset); npages 647 drivers/vhost/scsi.c return npages; npages 231 drivers/xen/privcmd.c if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) || npages 232 drivers/xen/privcmd.c ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va)) npages 237 drivers/xen/privcmd.c ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) npages 242 drivers/xen/privcmd.c msg->mfn, msg->npages, npages 248 drivers/xen/privcmd.c st->va += msg->npages << PAGE_SHIFT; npages 159 fs/afs/dir_edit.c block->hdr.npages = htons(1); npages 262 fs/afs/dir_edit.c ntohs(block->hdr.npages), npages 69 fs/afs/xdr_fs.h __be16 npages; npages 134 fs/ceph/file.c int npages; npages 137 fs/ceph/file.c npages = iov_iter_npages(iter, INT_MAX); npages 144 fs/ceph/file.c bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO); npages 158 fs/ceph/file.c *num_bvecs = npages; npages 1292 fs/cifs/cifsglob.h unsigned int npages; npages 3549 fs/cifs/file.c unsigned int npages, rsize; npages 3617 fs/cifs/file.c npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE; npages 3619 fs/cifs/file.c rdata->tailsz = npages > 1 ? npages 3620 fs/cifs/file.c cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE : npages 3625 fs/cifs/file.c npages = DIV_ROUND_UP(cur_len, PAGE_SIZE); npages 3627 fs/cifs/file.c rdata = cifs_readdata_alloc(npages, npages 3635 fs/cifs/file.c rc = cifs_read_allocate_pages(rdata, npages); npages 3647 fs/cifs/file.c rdata->nr_pages = npages; npages 825 fs/cifs/misc.c for (i = 0; i < ctx->npages; i++) { npages 843 fs/cifs/misc.c unsigned int npages = 0; npages 884 fs/cifs/misc.c while (count && npages < max_pages) { npages 902 fs/cifs/misc.c if (npages + cur_npages > max_pages) { npages 904 fs/cifs/misc.c npages + cur_npages, max_pages); npages 910 fs/cifs/misc.c bv[npages + i].bv_page = pages[i]; npages 911 fs/cifs/misc.c bv[npages + i].bv_offset = start; npages 912 fs/cifs/misc.c bv[npages + i].bv_len = len - start; npages 917 fs/cifs/misc.c npages += cur_npages; npages 923 fs/cifs/misc.c ctx->npages = npages; npages 924 fs/cifs/misc.c iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); npages 3790 fs/cifs/smb2ops.c unsigned int npages; npages 3796 fs/cifs/smb2ops.c npages = old_rq[i - 1].rq_npages; npages 3797 fs/cifs/smb2ops.c pages = kmalloc_array(npages, sizeof(struct page *), npages 3803 fs/cifs/smb2ops.c new_rq[i].rq_npages = npages; npages 3812 fs/cifs/smb2ops.c for (j = 0; j < npages; j++) { npages 3819 fs/cifs/smb2ops.c for (j = 0; j < npages; j++) { npages 3860 fs/cifs/smb2ops.c unsigned int npages, unsigned int page_data_size) npages 3874 fs/cifs/smb2ops.c rqst.rq_npages = npages; npages 3893 fs/cifs/smb2ops.c unsigned int npages, unsigned int len) npages 3898 fs/cifs/smb2ops.c for (i = 0; i < npages; i++) { npages 3921 fs/cifs/smb2ops.c init_read_bvec(struct page **pages, unsigned int npages, unsigned int data_size, npages 3927 fs/cifs/smb2ops.c bvec = kcalloc(npages, sizeof(struct bio_vec), GFP_KERNEL); npages 3931 fs/cifs/smb2ops.c for (i = 0; i < npages; i++) { npages 3951 fs/cifs/smb2ops.c unsigned int npages, unsigned int page_data_size) npages 4049 fs/cifs/smb2ops.c rdata->result = init_read_bvec(pages, npages, page_data_size, npages 4056 fs/cifs/smb2ops.c iov_iter_bvec(&iter, WRITE, bvec, npages, data_len); npages 4059 fs/cifs/smb2ops.c WARN_ONCE(npages > 0, "read data can be either in buf or in pages"); npages 4087 fs/cifs/smb2ops.c unsigned int npages; npages 4100 fs/cifs/smb2ops.c dw->ppages, dw->npages, dw->len); npages 4114 fs/cifs/smb2ops.c dw->ppages, dw->npages, dw->len); npages 4120 fs/cifs/smb2ops.c for (i = dw->npages-1; i >= 0; i--) npages 4135 fs/cifs/smb2ops.c unsigned int npages; npages 4154 fs/cifs/smb2ops.c npages = DIV_ROUND_UP(len, PAGE_SIZE); npages 4156 fs/cifs/smb2ops.c pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); npages 4162 fs/cifs/smb2ops.c for (; i < npages; i++) { npages 4171 fs/cifs/smb2ops.c rc = read_data_into_pages(server, pages, npages, len); npages 4195 fs/cifs/smb2ops.c dw->npages = npages; npages 4206 fs/cifs/smb2ops.c pages, npages, len); npages 4218 fs/cifs/smb2ops.c pages, npages, len); npages 271 fs/ext2/dir.c unsigned long npages = dir_pages(inode); npages 282 fs/ext2/dir.c for ( ; n < npages; n++, offset = 0) { npages 347 fs/ext2/dir.c unsigned long npages = dir_pages(dir); npages 353 fs/ext2/dir.c if (npages == 0) npages 360 fs/ext2/dir.c if (start >= npages) npages 385 fs/ext2/dir.c if (++n >= npages) npages 471 fs/ext2/dir.c unsigned long npages = dir_pages(dir); npages 482 fs/ext2/dir.c for (n = 0; n <= npages; n++) { npages 647 fs/ext2/dir.c unsigned long i, npages = dir_pages(inode); npages 650 fs/ext2/dir.c for (i = 0; i < npages; i++) { npages 286 fs/f2fs/data.c static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) npages 291 fs/f2fs/data.c bio = f2fs_bio_alloc(sbi, npages, true); npages 270 fs/f2fs/debug.c unsigned npages = NODE_MAPPING(sbi)->nrpages; npages 271 fs/f2fs/debug.c si->page_mem += (unsigned long long)npages << PAGE_SHIFT; npages 274 fs/f2fs/debug.c unsigned npages = META_MAPPING(sbi)->nrpages; npages 275 fs/f2fs/debug.c si->page_mem += (unsigned long long)npages << PAGE_SHIFT; npages 309 fs/f2fs/dir.c unsigned long npages = dir_blocks(dir); npages 320 fs/f2fs/dir.c if (npages == 0) { npages 977 fs/f2fs/dir.c unsigned long npages = dir_blocks(inode); npages 1002 fs/f2fs/dir.c for (; n < npages; n++, ctx->pos = n * NR_DENTRY_IN_BLOCK) { npages 1012 fs/f2fs/dir.c if (npages - n > 1 && !ra_has_index(ra, n)) npages 1014 fs/f2fs/dir.c min(npages - n, (pgoff_t)MAX_DIR_RA_PAGES)); npages 2216 fs/f2fs/f2fs.h int npages, bool no_fail) npages 2222 fs/f2fs/f2fs.h bio = bio_alloc(GFP_NOIO, npages); npages 2224 fs/f2fs/f2fs.h bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); npages 2232 fs/f2fs/f2fs.h return bio_alloc(GFP_KERNEL, npages); npages 3573 fs/f2fs/segment.c int npages = f2fs_npages_for_summary_flush(sbi, true); npages 3575 fs/f2fs/segment.c if (npages >= 2) npages 3576 fs/f2fs/segment.c f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages, npages 22 fs/fuse/file.c static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, npages 27 fs/fuse/file.c pages = kzalloc(npages * (sizeof(struct page *) + npages 29 fs/fuse/file.c *desc = (void *) (pages + npages); npages 650 fs/fuse/file.c unsigned int npages) npages 657 fs/fuse/file.c ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, npages 1388 fs/fuse/file.c unsigned npages; npages 1401 fs/fuse/file.c npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; npages 1404 fs/fuse/file.c fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); npages 1406 fs/fuse/file.c ap->num_pages += npages; npages 1914 fs/fuse/file.c unsigned int npages = min_t(unsigned int, npages 1918 fs/fuse/file.c WARN_ON(npages <= data->max_pages); npages 1920 fs/fuse/file.c pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); npages 1929 fs/fuse/file.c data->max_pages = npages; npages 4035 fs/jfs/jfs_dmap.c #define BMAPPGTOLEV(npages) \ npages 4036 fs/jfs/jfs_dmap.c (((npages) <= 3 + MAXL0PAGES) ? 0 : \ npages 4037 fs/jfs/jfs_dmap.c ((npages) <= 2 + MAXL1PAGES) ? 1 : 2) npages 4043 fs/jfs/jfs_dmap.c s64 npages, ndmaps; npages 4048 fs/jfs/jfs_dmap.c npages = nblocks >> JFS_SBI(sb)->l2nbperpage; npages 4049 fs/jfs/jfs_dmap.c level = BMAPPGTOLEV(npages); npages 4056 fs/jfs/jfs_dmap.c npages--; /* skip the first global control page */ npages 4058 fs/jfs/jfs_dmap.c npages -= (2 - level); npages 4059 fs/jfs/jfs_dmap.c npages--; /* skip top level's control page */ npages 4063 fs/jfs/jfs_dmap.c complete = (u32) npages / factor; npages 4068 fs/jfs/jfs_dmap.c npages = (u32) npages % factor; npages 4070 fs/jfs/jfs_dmap.c npages--; npages 2372 fs/jfs/jfs_logmgr.c int npages = 0; npages 2383 fs/jfs/jfs_logmgr.c npages = logSize >> sbi->l2nbperpage; npages 2403 fs/jfs/jfs_logmgr.c logsuper->size = cpu_to_le32(npages); npages 2441 fs/jfs/jfs_logmgr.c lp->h.page = lp->t.page = cpu_to_le32(npages - 3); npages 2460 fs/jfs/jfs_logmgr.c for (lspn = 0; lspn < npages - 3; lspn++) { npages 2563 fs/jfs/jfs_xtree.c int nb, npages, nblks; npages 2663 fs/jfs/jfs_xtree.c npages = (nbytes + (CM_BSIZE - 1)) >> CM_L2BSIZE; npages 2673 fs/jfs/jfs_xtree.c offset += nb, pno++, npages--) { npages 2678 fs/jfs/jfs_xtree.c if (rc = cmRead(ip, offset, npages, &cp)) npages 87 fs/minix/dir.c unsigned long npages = dir_pages(inode); npages 99 fs/minix/dir.c for ( ; n < npages; n++, offset = 0) { npages 159 fs/minix/dir.c unsigned long npages = dir_pages(dir); npages 167 fs/minix/dir.c for (n = 0; n < npages; n++) { npages 208 fs/minix/dir.c unsigned long npages = dir_pages(dir); npages 223 fs/minix/dir.c for (n = 0; n <= npages; n++) { npages 364 fs/minix/dir.c unsigned long i, npages = dir_pages(inode); npages 369 fs/minix/dir.c for (i = 0; i < npages; i++) { npages 271 fs/nfs/blocklayout/blocklayout.c header->page_array.npages, f_offset, npages 283 fs/nfs/blocklayout/blocklayout.c for (i = pg_index; i < header->page_array.npages; i++) { npages 316 fs/nfs/blocklayout/blocklayout.c header->page_array.npages - i, npages 431 fs/nfs/blocklayout/blocklayout.c for (i = pg_index; i < header->page_array.npages; i++) { npages 445 fs/nfs/blocklayout/blocklayout.c bio = do_add_page_to_bio(bio, header->page_array.npages - i, npages 586 fs/nfs/dir.c void nfs_readdir_free_pages(struct page **pages, unsigned int npages) npages 589 fs/nfs/dir.c for (i = 0; i < npages; i++) npages 598 fs/nfs/dir.c int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages) npages 602 fs/nfs/dir.c for (i = 0; i < npages; i++) { npages 279 fs/nfs/direct.c static void nfs_direct_release_pages(struct page **pages, unsigned int npages) npages 282 fs/nfs/direct.c for (i = 0; i < npages; i++) npages 467 fs/nfs/direct.c unsigned npages, i; npages 476 fs/nfs/direct.c npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; npages 477 fs/nfs/direct.c for (i = 0; i < npages; i++) { npages 500 fs/nfs/direct.c nfs_direct_release_pages(pagevec, npages); npages 873 fs/nfs/direct.c unsigned npages, i; npages 882 fs/nfs/direct.c npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE; npages 883 fs/nfs/direct.c for (i = 0; i < npages; i++) { npages 914 fs/nfs/direct.c nfs_direct_release_pages(pagevec, npages); npages 450 fs/nfs/fscache.c unsigned npages = *nr_pages; npages 454 fs/nfs/fscache.c nfs_i_fscache(inode), npages, inode); npages 461 fs/nfs/fscache.c if (*nr_pages < npages) npages 463 fs/nfs/fscache.c npages); npages 198 fs/nfs/nfs3acl.c unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT); npages 202 fs/nfs/nfs3acl.c args.pages[args.npages] = alloc_page(GFP_KERNEL); npages 203 fs/nfs/nfs3acl.c if (args.pages[args.npages] == NULL) npages 205 fs/nfs/nfs3acl.c args.npages++; npages 206 fs/nfs/nfs3acl.c } while (args.npages < npages); npages 237 fs/nfs/nfs3acl.c while (args.npages != 0) { npages 238 fs/nfs/nfs3acl.c args.npages--; npages 239 fs/nfs/nfs3acl.c __free_page(args.pages[args.npages]); npages 1352 fs/nfs/nfs3xdr.c if (args->npages != 0) npages 5615 fs/nfs/nfs4proc.c unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; npages 5618 fs/nfs/nfs4proc.c if (npages > ARRAY_SIZE(pages)) npages 5621 fs/nfs/nfs4proc.c for (i = 0; i < npages; i++) { npages 5632 fs/nfs/nfs4proc.c args.acl_len = npages * PAGE_SIZE; npages 5635 fs/nfs/nfs4proc.c __func__, buf, buflen, npages, args.acl_len); npages 5660 fs/nfs/nfs4proc.c for (i = 0; i < npages; i++) npages 5719 fs/nfs/nfs4proc.c unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); npages 5724 fs/nfs/nfs4proc.c if (npages > ARRAY_SIZE(pages)) npages 773 fs/nfs/pagelist.c pg_array->npages = pagecount; npages 780 fs/nfs/pagelist.c pg_array->npages = 0; npages 1028 fs/nfs/pnfs.c size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 1029 fs/nfs/pnfs.c if (npages < max_pages) npages 1030 fs/nfs/pnfs.c max_pages = npages; npages 413 fs/nfs/read.c unsigned long npages; npages 451 fs/nfs/read.c npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> npages 453 fs/nfs/read.c nfs_add_stats(inode, NFSIOS_READPAGES, npages); npages 270 fs/nilfs2/dir.c unsigned long npages = dir_pages(inode); npages 275 fs/nilfs2/dir.c for ( ; n < npages; n++, offset = 0) { npages 332 fs/nilfs2/dir.c unsigned long npages = dir_pages(dir); npages 337 fs/nilfs2/dir.c if (npages == 0) npages 344 fs/nilfs2/dir.c if (start >= npages) npages 368 fs/nilfs2/dir.c if (++n >= npages) npages 448 fs/nilfs2/dir.c unsigned long npages = dir_pages(dir); npages 459 fs/nilfs2/dir.c for (n = 0; n <= npages; n++) { npages 624 fs/nilfs2/dir.c unsigned long i, npages = dir_pages(inode); npages 626 fs/nilfs2/dir.c for (i = 0; i < npages; i++) { npages 89 fs/orangefs/inode.c int npages; npages 107 fs/orangefs/inode.c for (i = 0; i < ow->npages; i++) { npages 119 fs/orangefs/inode.c iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len); npages 131 fs/orangefs/inode.c for (i = 0; i < ow->npages; i++) { npages 146 fs/orangefs/inode.c for (i = 0; i < ow->npages; i++) { npages 178 fs/orangefs/inode.c if (ow->npages == 0) { npages 183 fs/orangefs/inode.c ow->pages[ow->npages++] = page; npages 189 fs/orangefs/inode.c ow->npages = 0; npages 195 fs/orangefs/inode.c ow->pages[ow->npages++] = page; npages 201 fs/orangefs/inode.c if (ow->npages) { npages 203 fs/orangefs/inode.c ow->npages = 0; npages 210 fs/orangefs/inode.c if (ow->npages == ow->maxpages) { npages 212 fs/orangefs/inode.c ow->npages = 0; npages 241 fs/orangefs/inode.c if (ow->npages) npages 118 fs/qnx6/dir.c unsigned long npages = dir_pages(inode); npages 127 fs/qnx6/dir.c for ( ; !done && n < npages; n++, start = 0) { npages 220 fs/qnx6/dir.c unsigned long npages = dir_pages(dir); npages 227 fs/qnx6/dir.c if (npages == 0) npages 230 fs/qnx6/dir.c if (start >= npages) npages 262 fs/qnx6/dir.c if (++n >= npages) npages 64 fs/ramfs/file-nommu.c unsigned long npages, xpages, loop; npages 90 fs/ramfs/file-nommu.c npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 95 fs/ramfs/file-nommu.c for (loop = npages; loop < xpages; loop++) npages 99 fs/ramfs/file-nommu.c newsize = PAGE_SIZE * npages; npages 104 fs/ramfs/file-nommu.c for (loop = 0; loop < npages; loop++) { npages 123 fs/ramfs/file-nommu.c while (loop < npages) npages 69 fs/sysv/dir.c unsigned long npages = dir_pages(inode); npages 80 fs/sysv/dir.c for ( ; n < npages; n++, offset = 0) { npages 133 fs/sysv/dir.c unsigned long npages = dir_pages(dir); npages 140 fs/sysv/dir.c if (start >= npages) npages 161 fs/sysv/dir.c if (++n >= npages) npages 180 fs/sysv/dir.c unsigned long npages = dir_pages(dir); npages 187 fs/sysv/dir.c for (n = 0; n <= npages; n++) { npages 288 fs/sysv/dir.c unsigned long i, npages = dir_pages(inode); npages 290 fs/sysv/dir.c for (i = 0; i < npages; i++) { npages 258 fs/ufs/dir.c unsigned long npages = dir_pages(dir); npages 265 fs/ufs/dir.c if (npages == 0 || namelen > UFS_MAXNAMLEN) npages 273 fs/ufs/dir.c if (start >= npages) npages 290 fs/ufs/dir.c if (++n >= npages) npages 316 fs/ufs/dir.c unsigned long npages = dir_pages(dir); npages 329 fs/ufs/dir.c for (n = 0; n <= npages; n++) { npages 430 fs/ufs/dir.c unsigned long npages = dir_pages(inode); npages 440 fs/ufs/dir.c for ( ; n < npages; n++, offset = 0) { npages 595 fs/ufs/dir.c unsigned long i, npages = dir_pages(inode); npages 597 fs/ufs/dir.c for (i = 0; i < npages; i++) { npages 62 include/crypto/if_alg.h unsigned int npages; npages 1327 include/linux/efi.h static inline void memrange_efi_to_native(u64 *addr, u64 *npages) npages 1329 include/linux/efi.h *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr); npages 345 include/linux/kvm_host.h unsigned long npages; npages 355 include/linux/kvm_host.h return ALIGN(memslot->npages, BITS_PER_LONG) / 8; npages 585 include/linux/kvm_host.h memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ npages 683 include/linux/kvm_host.h unsigned long npages); npages 1018 include/linux/kvm_host.h gfn < memslots[slot].base_gfn + memslots[slot].npages) npages 1031 include/linux/kvm_host.h gfn < memslots[start].base_gfn + memslots[start].npages) { npages 196 include/linux/migrate.h unsigned long npages; npages 646 include/linux/mlx4/device.h int npages; npages 1112 include/linux/mlx4/device.h int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, npages 1118 include/linux/mlx4/device.h int npages, int page_shift, struct mlx4_mr *mr); npages 1126 include/linux/mlx4/device.h int start_index, int npages, u64 *page_list); npages 1416 include/linux/mlx4/device.h int npages, u64 iova, u32 *lkey, u32 *rkey); npages 1538 include/linux/mlx4/device.h u64 iova, u64 size, int npages, npages 330 include/linux/mlx5/driver.h int npages; npages 943 include/linux/mlx5/driver.h gfp_t flags, int npages); npages 968 include/linux/mlx5/driver.h s32 npages, bool ec_function); npages 1085 include/linux/mm.h void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, npages 1088 include/linux/mm.h void put_user_pages(struct page **pages, unsigned long npages); npages 2324 include/linux/mm.h extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages); npages 2325 include/linux/mm.h extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); npages 512 include/linux/mtd/nand.h unsigned int npages; npages 514 include/linux/mtd/nand.h npages = pos->page + npages 521 include/linux/mtd/nand.h return (loff_t)npages * nand->memorg.pagesize; npages 852 include/linux/nfs_xdr.h unsigned int npages; npages 1492 include/linux/nfs_xdr.h unsigned int npages; /* Max length of pagevec */ npages 67 include/rdma/ib_umem_odp.h int npages; npages 153 include/uapi/drm/nouveau_drm.h __u64 npages; npages 53 include/uapi/xen/privcmd.h __u64 npages; npages 1257 lib/iov_iter.c unsigned npages; npages 1269 lib/iov_iter.c npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; npages 1270 lib/iov_iter.c capacity = min(npages,maxpages) * PAGE_SIZE - *start; npages 1328 lib/iov_iter.c int npages; npages 1338 lib/iov_iter.c npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; npages 1339 lib/iov_iter.c n = npages * PAGE_SIZE - *start; npages 1343 lib/iov_iter.c npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); npages 1344 lib/iov_iter.c p = get_pages_array(npages); npages 1554 lib/iov_iter.c int npages = 0; npages 1571 lib/iov_iter.c npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1; npages 1572 lib/iov_iter.c if (npages >= maxpages) npages 1576 lib/iov_iter.c npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) npages 1578 lib/iov_iter.c if (npages >= maxpages) npages 1581 lib/iov_iter.c npages++; npages 1582 lib/iov_iter.c if (npages >= maxpages) npages 1586 lib/iov_iter.c npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) npages 1588 lib/iov_iter.c if (npages >= maxpages) npages 1592 lib/iov_iter.c return npages; npages 54 mm/gup.c void put_user_pages_dirty_lock(struct page **pages, unsigned long npages, npages 66 mm/gup.c put_user_pages(pages, npages); npages 70 mm/gup.c for (index = 0; index < npages; index++) { npages 108 mm/gup.c void put_user_pages(struct page **pages, unsigned long npages) npages 117 mm/gup.c for (index = 0; index < npages; index++) npages 358 mm/hmm.c const uint64_t *pfns, unsigned long npages, npages 370 mm/hmm.c for (i = 0; i < npages; ++i) { npages 384 mm/hmm.c unsigned long i, npages; npages 388 mm/hmm.c npages = (end - addr) >> PAGE_SHIFT; npages 390 mm/hmm.c hmm_range_need_fault(hmm_vma_walk, pfns, npages, npages 410 mm/hmm.c unsigned long pfn, npages, i; npages 414 mm/hmm.c npages = (end - addr) >> PAGE_SHIFT; npages 416 mm/hmm.c hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, npages 571 mm/hmm.c unsigned long npages; npages 575 mm/hmm.c npages = (end - addr) >> PAGE_SHIFT; npages 578 mm/hmm.c hmm_range_need_fault(hmm_vma_walk, pfns, npages, npages 672 mm/hmm.c unsigned long i, npages, pfn; npages 680 mm/hmm.c npages = (end - addr) >> PAGE_SHIFT; npages 684 mm/hmm.c hmm_range_need_fault(hmm_vma_walk, pfns, npages, npages 691 mm/hmm.c for (i = 0; i < npages; ++i, ++pfn) { npages 967 mm/hmm.c unsigned long i, npages, mapped; npages 974 mm/hmm.c npages = (range->end - range->start) >> PAGE_SHIFT; npages 975 mm/hmm.c for (i = 0, mapped = 0; i < npages; ++i) { npages 1015 mm/hmm.c for (npages = i, i = 0; (i < npages) && mapped; ++i) { npages 1056 mm/hmm.c unsigned long i, npages; npages 1067 mm/hmm.c npages = (range->end - range->start) >> PAGE_SHIFT; npages 1068 mm/hmm.c for (i = 0; i < npages; ++i) { npages 2164 mm/migrate.c migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; npages 2165 mm/migrate.c migrate->dst[migrate->npages] = 0; npages 2166 mm/migrate.c migrate->npages++; npages 2181 mm/migrate.c migrate->dst[migrate->npages] = 0; npages 2182 mm/migrate.c migrate->src[migrate->npages++] = 0; npages 2339 mm/migrate.c migrate->dst[migrate->npages] = 0; npages 2340 mm/migrate.c migrate->src[migrate->npages++] = mpfn; npages 2377 mm/migrate.c migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); npages 2444 mm/migrate.c const unsigned long npages = migrate->npages; npages 2451 mm/migrate.c for (i = 0; (i < npages) && migrate->cpages; i++) { npages 2526 mm/migrate.c for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) { npages 2555 mm/migrate.c const unsigned long npages = migrate->npages; npages 2559 mm/migrate.c for (i = 0; i < npages; i++) { npages 2580 mm/migrate.c for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) { npages 2684 mm/migrate.c args->npages = 0; npages 2844 mm/migrate.c const unsigned long npages = migrate->npages; npages 2850 mm/migrate.c for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { npages 2931 mm/migrate.c const unsigned long npages = migrate->npages; npages 2934 mm/migrate.c for (i = 0; i < npages; i++) { npages 3287 mm/mmap.c bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) npages 3289 mm/mmap.c if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) npages 3293 mm/mmap.c mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { npages 3296 mm/mmap.c mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) npages 3301 mm/mmap.c (mm->data_vm + npages) << PAGE_SHIFT, npages 3312 mm/mmap.c void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) npages 3314 mm/mmap.c mm->total_vm += npages; npages 3317 mm/mmap.c mm->exec_vm += npages; npages 3319 mm/mmap.c mm->stack_vm += npages; npages 3321 mm/mmap.c mm->data_vm += npages; npages 1388 mm/nommu.c unsigned long npages; npages 1412 mm/nommu.c npages = (addr - vma->vm_start) >> PAGE_SHIFT; npages 1418 mm/nommu.c region->vm_pgoff = new->vm_pgoff += npages; npages 1429 mm/nommu.c vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; npages 5650 net/core/skbuff.c int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; npages 5660 net/core/skbuff.c if (npages > MAX_SKB_FRAGS) npages 5668 net/core/skbuff.c skb->truesize += npages << PAGE_SHIFT; npages 5670 net/core/skbuff.c for (i = 0; npages > 0; i++) { npages 5674 net/core/skbuff.c if (npages >= 1 << order) { npages 5695 net/core/skbuff.c npages -= 1 << order; npages 4708 net/ipv4/tcp_input.c int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); npages 4710 net/ipv4/tcp_input.c data_len = npages << PAGE_SHIFT; npages 35 net/rds/ib_fmr.c struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages) npages 42 net/rds/ib_fmr.c if (npages <= RDS_MR_8K_MSG_SIZE) npages 54 net/rds/ib_frmr.c int npages) npages 61 net/rds/ib_frmr.c if (npages <= RDS_MR_8K_MSG_SIZE) npages 117 net/rds/ib_mr.h int npages); npages 205 net/sunrpc/auth_gss/gss_rpc_upcall.c for (i = 0; i < arg->npages && arg->pages[i]; i++) npages 213 net/sunrpc/auth_gss/gss_rpc_upcall.c arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); npages 214 net/sunrpc/auth_gss/gss_rpc_upcall.c arg->pages = kcalloc(arg->npages, sizeof(struct page *), GFP_KERNEL); npages 773 net/sunrpc/auth_gss/gss_rpc_xdr.c arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); npages 138 net/sunrpc/auth_gss/gss_rpc_xdr.h unsigned int npages; npages 863 net/sunrpc/svcsock.c unsigned int i, len, npages; npages 868 net/sunrpc/svcsock.c npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 869 net/sunrpc/svcsock.c for (i = 0; i < npages; i++) { npages 882 net/sunrpc/svcsock.c unsigned int i, len, npages; npages 887 net/sunrpc/svcsock.c npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 888 net/sunrpc/svcsock.c for (i = 0; i < npages; i++) { npages 896 net/sunrpc/svcsock.c unsigned int i, len, npages; npages 901 net/sunrpc/svcsock.c npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; npages 902 net/sunrpc/svcsock.c for (i = 0; i < npages; i++) { npages 941 net/sunrpc/xprtrdma/rpc_rdma.c int i, npages, curlen; npages 972 net/sunrpc/xprtrdma/rpc_rdma.c npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; npages 973 net/sunrpc/xprtrdma/rpc_rdma.c for (i = 0; i < npages; i++) { npages 95 sound/pci/emu10k1/memory.c static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp) npages 98 sound/pci/emu10k1/memory.c int max_size = npages; npages 108 sound/pci/emu10k1/memory.c if (size == npages) { npages 378 sound/pci/emu10k1/memory.c size_t npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; npages 379 sound/pci/emu10k1/memory.c size_t size_real = npages * PAGE_SIZE; npages 94 tools/testing/selftests/kvm/include/kvm_util.h uint64_t guest_paddr, uint32_t slot, uint64_t npages, npages 577 tools/testing/selftests/kvm/lib/kvm_util.c uint64_t guest_paddr, uint32_t slot, uint64_t npages, npages 589 tools/testing/selftests/kvm/lib/kvm_util.c TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) npages 594 tools/testing/selftests/kvm/lib/kvm_util.c guest_paddr, npages, vm->max_gfn, vm->page_size); npages 601 tools/testing/selftests/kvm/lib/kvm_util.c vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); npages 608 tools/testing/selftests/kvm/lib/kvm_util.c guest_paddr, npages, vm->page_size, npages 623 tools/testing/selftests/kvm/lib/kvm_util.c slot, guest_paddr, npages, npages 631 tools/testing/selftests/kvm/lib/kvm_util.c region->mmap_size = npages * vm->page_size; npages 661 tools/testing/selftests/kvm/lib/kvm_util.c ret = madvise(region->host_mem, npages * vm->page_size, npages 667 tools/testing/selftests/kvm/lib/kvm_util.c region->host_mem, npages * vm->page_size, src_type); npages 672 tools/testing/selftests/kvm/lib/kvm_util.c guest_paddr >> vm->page_shift, npages); npages 676 tools/testing/selftests/kvm/lib/kvm_util.c region->region.memory_size = npages * vm->page_size; npages 999 tools/testing/selftests/kvm/lib/kvm_util.c size_t npages = size / page_size; npages 1004 tools/testing/selftests/kvm/lib/kvm_util.c while (npages--) { npages 496 tools/testing/selftests/kvm/lib/x86_64/vmx.c size_t npages = size / page_size; npages 501 tools/testing/selftests/kvm/lib/x86_64/vmx.c while (npages--) { npages 417 virt/kvm/arm/mmu.c phys_addr_t end = addr + PAGE_SIZE * memslot->npages; npages 928 virt/kvm/arm/mmu.c phys_addr_t size = PAGE_SIZE * memslot->npages; npages 1545 virt/kvm/arm/mmu.c phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; npages 1620 virt/kvm/arm/mmu.c size = memslot->npages * PAGE_SIZE; npages 2034 virt/kvm/arm/mmu.c (memslot->npages << PAGE_SHIFT)); npages 2294 virt/kvm/arm/mmu.c if (memslot->base_gfn + memslot->npages >= npages 2376 virt/kvm/arm/mmu.c unsigned long npages) npages 2394 virt/kvm/arm/mmu.c phys_addr_t size = slot->npages << PAGE_SHIFT; npages 603 virt/kvm/kvm_main.c free->npages = 0; npages 891 virt/kvm/kvm_main.c WARN_ON(mslots[i].npages || !new->npages); npages 895 virt/kvm/kvm_main.c WARN_ON(new->npages || !mslots[i].npages); npages 903 virt/kvm/kvm_main.c if (!mslots[i + 1].npages) npages 919 virt/kvm/kvm_main.c if (new->npages) { npages 996 virt/kvm/kvm_main.c unsigned long npages; npages 1029 virt/kvm/kvm_main.c npages = mem->memory_size >> PAGE_SHIFT; npages 1031 virt/kvm/kvm_main.c if (npages > KVM_MEM_MAX_NR_PAGES) npages 1038 virt/kvm/kvm_main.c new.npages = npages; npages 1041 virt/kvm/kvm_main.c if (npages) { npages 1042 virt/kvm/kvm_main.c if (!old.npages) npages 1046 virt/kvm/kvm_main.c (npages != old.npages) || npages 1060 virt/kvm/kvm_main.c if (!old.npages) npages 1074 virt/kvm/kvm_main.c if (!((base_gfn + npages <= slot->base_gfn) || npages 1075 virt/kvm/kvm_main.c (base_gfn >= slot->base_gfn + slot->npages))) npages 1088 virt/kvm/kvm_main.c if (kvm_arch_create_memslot(kvm, &new, npages)) npages 1329 virt/kvm/kvm_main.c if (log->first_page > memslot->npages || npages 1330 virt/kvm/kvm_main.c log->num_pages > memslot->npages - log->first_page || npages 1331 virt/kvm/kvm_main.c (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63))) npages 1442 virt/kvm/kvm_main.c *nr_pages = slot->npages - (gfn - slot->base_gfn); npages 1522 virt/kvm/kvm_main.c int npages; npages 1532 virt/kvm/kvm_main.c npages = __get_user_pages_fast(addr, 1, 1, page); npages 1533 virt/kvm/kvm_main.c if (npages == 1) { npages 1553 virt/kvm/kvm_main.c int npages = 0; npages 1565 virt/kvm/kvm_main.c npages = get_user_pages_unlocked(addr, 1, &page, flags); npages 1566 virt/kvm/kvm_main.c if (npages != 1) npages 1567 virt/kvm/kvm_main.c return npages; npages 1580 virt/kvm/kvm_main.c return npages; npages 1662 virt/kvm/kvm_main.c int npages, r; npages 1673 virt/kvm/kvm_main.c npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn); npages 1674 virt/kvm/kvm_main.c if (npages == 1) npages 1678 virt/kvm/kvm_main.c if (npages == -EHWPOISON ||