nent 138 arch/alpha/kernel/pci_iommu.c long i, p, nent; nent 153 arch/alpha/kernel/pci_iommu.c nent = arena->size >> PAGE_SHIFT; nent 158 arch/alpha/kernel/pci_iommu.c while (i < n && p+i < nent) { nent 3696 arch/powerpc/xmon/xmon.c int nent, assoc, new_cc = 1; nent 3715 arch/powerpc/xmon/xmon.c nent = tlbcfg & 0xfff; nent 3717 arch/powerpc/xmon/xmon.c for (i = 0; i < nent; i++) { nent 207 arch/x86/include/uapi/asm/kvm.h __u32 nent; nent 229 arch/x86/include/uapi/asm/kvm.h __u32 nent; nent 206 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) nent 209 arch/x86/kvm/cpuid.c if (cpuid->nent) { nent 212 arch/x86/kvm/cpuid.c cpuid->nent)); nent 217 arch/x86/kvm/cpuid.c cpuid->nent * sizeof(struct kvm_cpuid_entry))) nent 220 arch/x86/kvm/cpuid.c for (i = 0; i < cpuid->nent; i++) { nent 232 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; nent 250 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) nent 254 arch/x86/kvm/cpuid.c cpuid->nent * sizeof(struct kvm_cpuid_entry2))) nent 256 arch/x86/kvm/cpuid.c vcpu->arch.cpuid_nent = cpuid->nent; nent 271 arch/x86/kvm/cpuid.c if (cpuid->nent < vcpu->arch.cpuid_nent) nent 280 arch/x86/kvm/cpuid.c cpuid->nent = vcpu->arch.cpuid_nent; nent 321 arch/x86/kvm/cpuid.c u32 func, int *nent, int maxnent) nent 330 arch/x86/kvm/cpuid.c ++*nent; nent 334 arch/x86/kvm/cpuid.c ++*nent; nent 340 arch/x86/kvm/cpuid.c ++*nent; nent 433 arch/x86/kvm/cpuid.c int *nent, int maxnent) nent 510 arch/x86/kvm/cpuid.c if (WARN_ON(*nent >= maxnent)) nent 514 arch/x86/kvm/cpuid.c ++*nent; nent 539 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 543 arch/x86/kvm/cpuid.c ++*nent; nent 554 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 561 arch/x86/kvm/cpuid.c ++*nent; nent 579 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 584 arch/x86/kvm/cpuid.c ++*nent; nent 636 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 640 arch/x86/kvm/cpuid.c ++*nent; nent 657 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 677 arch/x86/kvm/cpuid.c ++*nent; nent 690 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 693 arch/x86/kvm/cpuid.c ++*nent; nent 815 arch/x86/kvm/cpuid.c int *nent, int maxnent, unsigned int type) nent 817 arch/x86/kvm/cpuid.c if (*nent >= maxnent) nent 821 arch/x86/kvm/cpuid.c return __do_cpuid_func_emulated(entry, func, nent, maxnent); nent 823 arch/x86/kvm/cpuid.c return __do_cpuid_func(entry, func, nent, maxnent); nent 870 arch/x86/kvm/cpuid.c int limit, nent = 0, r = -E2BIG, i; nent 879 arch/x86/kvm/cpuid.c if (cpuid->nent < 1) nent 881 arch/x86/kvm/cpuid.c if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) nent 882 arch/x86/kvm/cpuid.c cpuid->nent = KVM_MAX_CPUID_ENTRIES; nent 884 arch/x86/kvm/cpuid.c if (sanity_check_entries(entries, cpuid->nent, type)) nent 889 arch/x86/kvm/cpuid.c cpuid->nent)); nent 900 arch/x86/kvm/cpuid.c r = do_cpuid_func(&cpuid_entries[nent], ent->func, nent 901 arch/x86/kvm/cpuid.c &nent, cpuid->nent, type); nent 906 arch/x86/kvm/cpuid.c limit = cpuid_entries[nent - 1].eax; nent 907 arch/x86/kvm/cpuid.c for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) nent 908 arch/x86/kvm/cpuid.c r = do_cpuid_func(&cpuid_entries[nent], func, nent 909 arch/x86/kvm/cpuid.c &nent, cpuid->nent, type); nent 917 arch/x86/kvm/cpuid.c nent * sizeof(struct kvm_cpuid_entry2))) nent 919 arch/x86/kvm/cpuid.c cpuid->nent = nent; nent 933 arch/x86/kvm/cpuid.c int nent = vcpu->arch.cpuid_nent; nent 938 arch/x86/kvm/cpuid.c j = (j + 1) % nent; nent 1799 arch/x86/kvm/hyperv.c int i, nent = ARRAY_SIZE(cpuid_entries); nent 1806 arch/x86/kvm/hyperv.c --nent; nent 1808 arch/x86/kvm/hyperv.c if (cpuid->nent < nent) nent 1811 arch/x86/kvm/hyperv.c if (cpuid->nent > nent) nent 1812 arch/x86/kvm/hyperv.c cpuid->nent = nent; nent 1814 arch/x86/kvm/hyperv.c for (i = 0; i < nent; i++) { nent 1910 arch/x86/kvm/hyperv.c nent * sizeof(struct kvm_cpuid_entry2))) nent 63 arch/x86/xen/efi.c efi_systab_xen.nr_tables = info->cfg.nent; nent 42 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c int nent; nent 50 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c nent = (fuc.size / sizeof(struct gk20a_fw_av)); nent 51 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); nent 60 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c for (i = 0; i < nent; i++) { nent 91 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c int nent; nent 99 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); nent 100 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); nent 109 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c for (i = 0; i < nent; i++) { nent 136 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c int nent; nent 144 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c nent = (fuc.size / sizeof(struct gk20a_fw_av)); nent 147 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c (sizeof(*init) * (nent + max_classes + 1))); nent 155 drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c for (i = 0; i < nent; i++, init++) { nent 84 drivers/infiniband/hw/hns/hns_roce_cq.c static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, nent 144 drivers/infiniband/hw/hns/hns_roce_cq.c nent, vector); nent 255 drivers/infiniband/hw/hns/hns_roce_cq.c struct hns_roce_cq_buf *buf, u32 nent) nent 260 drivers/infiniband/hw/hns/hns_roce_cq.c ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, nent 286 drivers/infiniband/hw/hns/hns_roce_cq.c hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, nent 955 drivers/infiniband/hw/hns/hns_roce_device.h dma_addr_t dma_handle, int nent, u32 vector); nent 2072 drivers/infiniband/hw/hns/hns_roce_hw_v1.c u64 *mtts, dma_addr_t dma_handle, int nent, nent 2108 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ilog2((unsigned int)nent)); nent 2556 drivers/infiniband/hw/hns/hns_roce_hw_v2.c u64 *mtts, dma_addr_t dma_handle, int nent, nent 2569 drivers/infiniband/hw/hns/hns_roce_hw_v2.c V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent)); nent 101 drivers/infiniband/hw/mlx4/cq.c static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) nent 105 drivers/infiniband/hw/mlx4/cq.c err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, nent 127 drivers/infiniband/hw/mlx4/cq.c mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); nent 73 drivers/infiniband/hw/mlx5/cq.c static u8 sw_ownership_bit(int n, int nent) nent 75 drivers/infiniband/hw/mlx5/cq.c return (n & nent) ? 1 : 0; nent 659 drivers/infiniband/hw/mlx5/cq.c int nent, nent 668 drivers/infiniband/hw/mlx5/cq.c nent * cqe_size, nent 677 drivers/infiniband/hw/mlx5/cq.c buf->nent = nent; nent 838 drivers/infiniband/hw/mlx5/cq.c for (i = 0; i < buf->nent; i++) { nent 1205 drivers/infiniband/hw/mlx5/cq.c (i + 1) & cq->resize_buf->nent); nent 1207 drivers/infiniband/hw/mlx5/cq.c sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); nent 455 drivers/infiniband/hw/mlx5/mlx5_ib.h int nent; nent 1518 drivers/infiniband/hw/mlx5/odp.c .nent = MLX5_IB_NUM_PF_EQE, nent 160 drivers/infiniband/hw/mthca/mthca_allocator.c int mthca_array_init(struct mthca_array *array, int nent) nent 162 drivers/infiniband/hw/mthca/mthca_allocator.c int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; nent 178 drivers/infiniband/hw/mthca/mthca_allocator.c void mthca_array_cleanup(struct mthca_array *array, int nent) nent 182 drivers/infiniband/hw/mthca/mthca_allocator.c for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) nent 664 drivers/infiniband/hw/mthca/mthca_cmd.c int nent = 0; nent 694 drivers/infiniband/hw/mthca/mthca_cmd.c pages[nent * 2] = cpu_to_be64(virt); nent 698 drivers/infiniband/hw/mthca/mthca_cmd.c pages[nent * 2 + 1] = nent 704 drivers/infiniband/hw/mthca/mthca_cmd.c if (++nent == MTHCA_MAILBOX_SIZE / 16) { nent 705 drivers/infiniband/hw/mthca/mthca_cmd.c err = mthca_cmd(dev, mailbox->dma, nent, 0, op, nent 709 drivers/infiniband/hw/mthca/mthca_cmd.c nent = 0; nent 714 drivers/infiniband/hw/mthca/mthca_cmd.c if (nent) nent 715 drivers/infiniband/hw/mthca/mthca_cmd.c err = mthca_cmd(dev, mailbox->dma, nent, 0, op, nent 348 drivers/infiniband/hw/mthca/mthca_cq.c int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) nent 353 drivers/infiniband/hw/mthca/mthca_cq.c ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, nent 360 drivers/infiniband/hw/mthca/mthca_cq.c for (i = 0; i < nent; ++i) nent 768 drivers/infiniband/hw/mthca/mthca_cq.c int mthca_init_cq(struct mthca_dev *dev, int nent, nent 776 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.cqe = nent - 1; nent 812 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_alloc_cq_buf(dev, &cq->buf, nent); nent 826 drivers/infiniband/hw/mthca/mthca_cq.c cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); nent 422 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_array_init(struct mthca_array *array, int nent); nent 423 drivers/infiniband/hw/mthca/mthca_dev.h void mthca_array_cleanup(struct mthca_array *array, int nent); nent 498 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_init_cq(struct mthca_dev *dev, int nent, nent 509 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); nent 184 drivers/infiniband/hw/mthca/mthca_eq.c mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), nent 230 drivers/infiniband/hw/mthca/mthca_eq.c unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; nent 466 drivers/infiniband/hw/mthca/mthca_eq.c int nent, nent 479 drivers/infiniband/hw/mthca/mthca_eq.c eq->nent = roundup_pow_of_two(max(nent, 2)); nent 480 drivers/infiniband/hw/mthca/mthca_eq.c npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; nent 511 drivers/infiniband/hw/mthca/mthca_eq.c for (i = 0; i < eq->nent; ++i) nent 535 drivers/infiniband/hw/mthca/mthca_eq.c eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); nent 560 drivers/infiniband/hw/mthca/mthca_eq.c eq->eqn, eq->nent); nent 593 drivers/infiniband/hw/mthca/mthca_eq.c int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / nent 612 drivers/infiniband/hw/mthca/mthca_provider.c int nent; nent 648 drivers/infiniband/hw/mthca/mthca_provider.c for (nent = 1; nent <= entries; nent <<= 1) nent 651 drivers/infiniband/hw/mthca/mthca_provider.c err = mthca_init_cq(to_mdev(ibdev), nent, context, nent 113 drivers/infiniband/hw/mthca/mthca_provider.h int nent; nent 1414 drivers/infiniband/hw/qib/qib.h int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent); nent 211 drivers/infiniband/hw/qib/qib_pcie.c int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) nent 229 drivers/infiniband/hw/qib/qib_pcie.c maxvec = (nent && *nent) ? *nent : 1; nent 239 drivers/infiniband/hw/qib/qib_pcie.c if (nent) nent 240 drivers/infiniband/hw/qib/qib_pcie.c *nent = !dd->pcidev->msix_enabled ? 0 : nvec; nent 1655 drivers/iommu/arm-smmu-v3.c static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) nent 1659 drivers/iommu/arm-smmu-v3.c for (i = 0; i < nent; ++i) { nent 716 drivers/iommu/omap-iommu.c int nent = 1; nent 730 drivers/iommu/omap-iommu.c nent *= 16; nent 734 drivers/iommu/omap-iommu.c bytes *= nent; nent 735 drivers/iommu/omap-iommu.c memset(iopte, 0, nent * sizeof(*iopte)); nent 737 drivers/iommu/omap-iommu.c flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); nent 748 drivers/iommu/omap-iommu.c nent = 1; /* for the next L1 entry */ nent 752 drivers/iommu/omap-iommu.c nent *= 16; nent 756 drivers/iommu/omap-iommu.c bytes *= nent; nent 758 drivers/iommu/omap-iommu.c memset(iopgd, 0, nent * sizeof(*iopgd)); nent 759 drivers/iommu/omap-iommu.c flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); nent 1202 drivers/mailbox/bcm-pdc-mailbox.c int nent; nent 1211 drivers/mailbox/bcm-pdc-mailbox.c nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); nent 1212 drivers/mailbox/bcm-pdc-mailbox.c if (unlikely(nent == 0)) nent 1218 drivers/mailbox/bcm-pdc-mailbox.c nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, nent 1220 drivers/mailbox/bcm-pdc-mailbox.c if (unlikely(nent == 0)) { nent 341 drivers/net/ethernet/mellanox/mlx4/cq.c int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, nent 381 drivers/net/ethernet/mellanox/mlx4/cq.c cpu_to_be32((ilog2(nent) << 24) | nent 393 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_init_user_cqes(buf_addr, nent, nent 398 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_init_kernel_cqes(buf_addr, nent, nent 110 drivers/net/ethernet/mellanox/mlx4/eq.c unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; nent 124 drivers/net/ethernet/mellanox/mlx4/eq.c return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; nent 782 drivers/net/ethernet/mellanox/mlx4/eq.c eq->cons_index, eqe->owner, eq->nent, nent 785 drivers/net/ethernet/mellanox/mlx4/eq.c !!(eq->cons_index & eq->nent) ? "HW" : "SW"); nent 807 drivers/net/ethernet/mellanox/mlx4/eq.c eq->cons_index, eqe->owner, eq->nent, nent 809 drivers/net/ethernet/mellanox/mlx4/eq.c !!(eq->cons_index & eq->nent) ? "HW" : "SW"); nent 819 drivers/net/ethernet/mellanox/mlx4/eq.c eq->cons_index, eqe->owner, eq->nent, nent 822 drivers/net/ethernet/mellanox/mlx4/eq.c !!(eq->cons_index & eq->nent) ? "HW" : "SW"); nent 969 drivers/net/ethernet/mellanox/mlx4/eq.c static int mlx4_create_eq(struct mlx4_dev *dev, int nent, nent 983 drivers/net/ethernet/mellanox/mlx4/eq.c eq->nent = roundup_pow_of_two(max(nent, 2)); nent 987 drivers/net/ethernet/mellanox/mlx4/eq.c npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; nent 1038 drivers/net/ethernet/mellanox/mlx4/eq.c eq_context->log_eq_size = ilog2(eq->nent); nent 1097 drivers/net/ethernet/mellanox/mlx4/eq.c int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; nent 1522 drivers/net/ethernet/mellanox/mlx4/fw.c int nent = 0; nent 1552 drivers/net/ethernet/mellanox/mlx4/fw.c pages[nent * 2] = cpu_to_be64(virt); nent 1556 drivers/net/ethernet/mellanox/mlx4/fw.c pages[nent * 2 + 1] = nent 1562 drivers/net/ethernet/mellanox/mlx4/fw.c if (++nent == MLX4_MAILBOX_SIZE / 16) { nent 1563 drivers/net/ethernet/mellanox/mlx4/fw.c err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, nent 1568 drivers/net/ethernet/mellanox/mlx4/fw.c nent = 0; nent 1573 drivers/net/ethernet/mellanox/mlx4/fw.c if (nent) nent 1574 drivers/net/ethernet/mellanox/mlx4/fw.c err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, nent 398 drivers/net/ethernet/mellanox/mlx4/mlx4.h int nent; nent 236 drivers/net/ethernet/mellanox/mlx5/core/eq.c for (i = 0; i < eq->nent; i++) { nent 262 drivers/net/ethernet/mellanox/mlx5/core/eq.c eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); nent 264 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); nent 291 drivers/net/ethernet/mellanox/mlx5/core/eq.c MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); nent 578 drivers/net/ethernet/mellanox/mlx5/core/eq.c .nent = MLX5_NUM_CMD_EQE, nent 597 drivers/net/ethernet/mellanox/mlx5/core/eq.c .nent = MLX5_NUM_ASYNC_EQE, nent 616 drivers/net/ethernet/mellanox/mlx5/core/eq.c .nent = /* TODO: sriov max_vf + */ 1, nent 738 drivers/net/ethernet/mellanox/mlx5/core/eq.c eqe = get_eqe(eq, ci & (eq->nent - 1)); nent 739 drivers/net/ethernet/mellanox/mlx5/core/eq.c eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; nent 785 drivers/net/ethernet/mellanox/mlx5/core/eq.c int nent; nent 791 drivers/net/ethernet/mellanox/mlx5/core/eq.c nent = MLX5_COMP_EQ_SIZE; nent 811 drivers/net/ethernet/mellanox/mlx5/core/eq.c .nent = nent, nent 34 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h int nent; nent 57 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); nent 59 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; nent 1213 drivers/net/ethernet/mellanox/mlxsw/pci.c int nent = 0; nent 1235 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); nent 1236 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ nent 1237 drivers/net/ethernet/mellanox/mlxsw/pci.c if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { nent 1238 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); nent 1241 drivers/net/ethernet/mellanox/mlxsw/pci.c nent = 0; nent 1246 drivers/net/ethernet/mellanox/mlxsw/pci.c if (nent) { nent 1247 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); nent 1721 drivers/nvme/target/fc.c unsigned int nent; nent 1723 drivers/nvme/target/fc.c sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); nent 1728 drivers/nvme/target/fc.c fod->data_sg_cnt = nent; nent 1729 drivers/nvme/target/fc.c fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, nent 329 drivers/rapidio/rio_cm.c static void riocm_rx_fill(struct cm_dev *cm, int nent) nent 336 drivers/rapidio/rio_cm.c for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { nent 343 drivers/rapidio/rio_cm.c nent--; nent 1566 drivers/rapidio/rio_cm.c u32 nent; nent 1587 drivers/rapidio/rio_cm.c nent = min(info[0], cm->npeers); nent 1588 drivers/rapidio/rio_cm.c buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); nent 1599 drivers/rapidio/rio_cm.c if (++i == nent) nent 960 drivers/scsi/pm8001/pm80xx_hwi.h __le32 nent; nent 116 drivers/spi/spi-topcliff-pch.c int nent; nent 790 drivers/spi/spi-topcliff-pch.c dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, nent 793 drivers/spi/spi-topcliff-pch.c dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, nent 1042 drivers/spi/spi-topcliff-pch.c dma->nent = num; nent 1104 drivers/spi/spi-topcliff-pch.c dma->nent = num; nent 1017 drivers/tty/serial/atmel_serial.c int ret, nent; nent 1036 drivers/tty/serial/atmel_serial.c nent = dma_map_sg(port->dev, nent 1041 drivers/tty/serial/atmel_serial.c if (!nent) { nent 1198 drivers/tty/serial/atmel_serial.c int ret, nent; nent 1219 drivers/tty/serial/atmel_serial.c nent = dma_map_sg(port->dev, nent 1224 drivers/tty/serial/atmel_serial.c if (!nent) { nent 1144 drivers/tty/serial/fsl_lpuart.c int ret, nent; nent 1170 drivers/tty/serial/fsl_lpuart.c nent = dma_map_sg(sport->port.dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE); nent 1172 drivers/tty/serial/fsl_lpuart.c if (!nent) { nent 235 drivers/tty/serial/pch_uart.c int nent; nent 785 drivers/tty/serial/pch_uart.c for (i = 0; i < priv->nent; i++, sg++) { nent 793 drivers/tty/serial/pch_uart.c priv->nent = 0; nent 934 drivers/tty/serial/pch_uart.c int nent; nent 1010 drivers/tty/serial/pch_uart.c nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); nent 1011 drivers/tty/serial/pch_uart.c if (!nent) { nent 1016 drivers/tty/serial/pch_uart.c priv->nent = nent; nent 1018 drivers/tty/serial/pch_uart.c for (i = 0; i < nent; i++, sg++) { nent 1023 drivers/tty/serial/pch_uart.c if (i == (nent - 1)) nent 1030 drivers/tty/serial/pch_uart.c priv->sg_tx_p, nent, DMA_MEM_TO_DEV, nent 1037 drivers/tty/serial/pch_uart.c dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE); nent 1138 include/linux/mlx4/device.h int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, nent 17 include/linux/mlx5/eq.h int nent; nent 262 include/xen/interface/platform.h uint32_t nent; nent 492 lib/scatterlist.c unsigned int nent, nalloc; nent 495 lib/scatterlist.c nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); nent 497 lib/scatterlist.c if (length > (nent << (PAGE_SHIFT + order))) nent 499 lib/scatterlist.c nalloc = nent; nent 527 lib/scatterlist.c *nent_p = nent; nent 82 scripts/conmakehash.c int i, nuni, nent; nent 272 scripts/conmakehash.c nent = 0; nent 275 scripts/conmakehash.c while ( nent >= unicount[fp0] ) nent 278 scripts/conmakehash.c nent = 0; nent 280 scripts/conmakehash.c printf("0x%04x", unitable[fp0][nent++]); nent 207 tools/arch/x86/include/uapi/asm/kvm.h __u32 nent; nent 229 tools/arch/x86/include/uapi/asm/kvm.h __u32 nent; nent 690 tools/testing/selftests/kvm/lib/x86_64/processor.c int nent = 100; nent 694 tools/testing/selftests/kvm/lib/x86_64/processor.c size += nent * sizeof(struct kvm_cpuid_entry2); nent 701 tools/testing/selftests/kvm/lib/x86_64/processor.c cpuid->nent = nent; nent 756 tools/testing/selftests/kvm/lib/x86_64/processor.c for (i = 0; i < cpuid->nent; i++) { nent 54 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c TEST_ASSERT(hv_cpuid_entries->nent == 6, nent 57 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c hv_cpuid_entries->nent); nent 59 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c TEST_ASSERT(hv_cpuid_entries->nent == 7, nent 62 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c hv_cpuid_entries->nent); nent 64 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c for (i = 0; i < hv_cpuid_entries->nent; i++) { nent 102 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c static struct kvm_cpuid2 cpuid = {.nent = 0}; nent 115 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c int nent = 20; /* should be enough */ nent 118 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2)); nent 125 tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c cpuid->nent = nent;