nvec 2356 arch/mips/kernel/traps.c int nvec = cpu_has_veic ? 64 : 8; nvec 2357 arch/mips/kernel/traps.c for (i = 0; i < nvec; i++) nvec 185 arch/mips/pci/msi-octeon.c int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 200 arch/mips/pci/msi-octeon.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 42 arch/powerpc/include/asm/pci-bridge.h int nvec, int type); nvec 12 arch/powerpc/kernel/msi.c int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 23 arch/powerpc/kernel/msi.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 26 arch/powerpc/kernel/msi.c return phb->controller_ops.setup_msi_irqs(dev, nvec, type); nvec 37 arch/powerpc/platforms/4xx/hsta_msi.c static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 65 arch/powerpc/platforms/4xx/msi.c static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 74 arch/powerpc/platforms/4xx/msi.c __func__, nvec, type); nvec 253 arch/powerpc/platforms/cell/axon_msi.c static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 78 arch/powerpc/platforms/pasemi/msi.c static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 88 arch/powerpc/platforms/pasemi/msi.c pdev, nvec, type); nvec 159 arch/powerpc/platforms/powernv/pci.c int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 194 arch/powerpc/platforms/powernv/pci.h extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); nvec 127 arch/powerpc/platforms/pseries/msi.c static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) nvec 142 arch/powerpc/platforms/pseries/msi.c if (req_msi < nvec) { nvec 143 arch/powerpc/platforms/pseries/msi.c pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); nvec 154 arch/powerpc/platforms/pseries/msi.c static int check_req_msi(struct pci_dev *pdev, int nvec) nvec 156 arch/powerpc/platforms/pseries/msi.c return check_req(pdev, nvec, "ibm,req#msi"); nvec 159 arch/powerpc/platforms/pseries/msi.c static int check_req_msix(struct pci_dev *pdev, int nvec) nvec 161 arch/powerpc/platforms/pseries/msi.c return check_req(pdev, nvec, "ibm,req#msi-x"); nvec 376 arch/powerpc/platforms/pseries/msi.c int nvec = nvec_in; nvec 380 arch/powerpc/platforms/pseries/msi.c rc = check_req_msix(pdev, nvec); nvec 382 arch/powerpc/platforms/pseries/msi.c rc = check_req_msi(pdev, nvec); nvec 387 arch/powerpc/platforms/pseries/msi.c quota = msi_quota_for_device(pdev, nvec); nvec 389 arch/powerpc/platforms/pseries/msi.c if (quota && quota < nvec) nvec 400 arch/powerpc/platforms/pseries/msi.c int m = roundup_pow_of_two(nvec); nvec 404 arch/powerpc/platforms/pseries/msi.c nvec = m; nvec 417 arch/powerpc/platforms/pseries/msi.c rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); nvec 432 arch/powerpc/platforms/pseries/msi.c rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); nvec 436 arch/powerpc/platforms/pseries/msi.c rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); nvec 442 arch/powerpc/platforms/pseries/msi.c rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); nvec 444 arch/powerpc/platforms/pseries/msi.c if (rc != nvec) { nvec 445 arch/powerpc/platforms/pseries/msi.c if (nvec != nvec_in) { nvec 446 arch/powerpc/platforms/pseries/msi.c nvec = nvec_in; nvec 178 arch/powerpc/sysdev/fsl_msi.c static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 120 arch/powerpc/sysdev/mpic_u3msi.c static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 234 arch/s390/pci/pci_irq.c int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 245 arch/s390/pci/pci_irq.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 247 arch/s390/pci/pci_irq.c msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); nvec 315 arch/s390/pci/pci_irq.c return (msi_vecs == nvec) ? 0 : msi_vecs; nvec 9 arch/x86/include/asm/msi.h int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, nvec 114 arch/x86/include/asm/pci.h int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); nvec 288 arch/x86/include/asm/x86_init.h int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); nvec 50 arch/x86/include/asm/xen/pci.h int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec); nvec 69 arch/x86/include/asm/xen/pci.h int vectors[], int nvec) nvec 72 arch/x86/include/asm/xen/pci.h return xen_pci_frontend->enable_msix(dev, vectors, nvec); nvec 184 arch/x86/kernel/apic/msi.c int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 199 arch/x86/kernel/apic/msi.c return msi_domain_alloc_irqs(domain, &dev->dev, nvec); nvec 213 arch/x86/kernel/apic/msi.c int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, nvec 137 arch/x86/kernel/x86_init.c int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 139 arch/x86/kernel/x86_init.c return x86_msi.setup_msi_irqs(dev, nvec, type); nvec 163 arch/x86/pci/xen.c static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 169 arch/x86/pci/xen.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 172 arch/x86/pci/xen.c v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL); nvec 177 arch/x86/pci/xen.c ret = xen_pci_frontend_enable_msix(dev, v, nvec); nvec 185 arch/x86/pci/xen.c (type == PCI_CAP_ID_MSI) ? nvec : 1, nvec 228 arch/x86/pci/xen.c static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 234 arch/x86/pci/xen.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 247 arch/x86/pci/xen.c (type == PCI_CAP_ID_MSI) ? nvec : 1, nvec 267 arch/x86/pci/xen.c static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 291 arch/x86/pci/xen.c if (type == PCI_CAP_ID_MSI && nvec > 1) { nvec 293 arch/x86/pci/xen.c map_irq.entry_nr = nvec; nvec 315 arch/x86/pci/xen.c if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) { nvec 342 arch/x86/pci/xen.c (type == PCI_CAP_ID_MSI) ? nvec : 1, nvec 1525 drivers/ata/ahci.c int nvec; nvec 1536 drivers/ata/ahci.c nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX, nvec 1538 drivers/ata/ahci.c if (nvec > 0) { nvec 1542 drivers/ata/ahci.c return nvec; nvec 1559 drivers/ata/ahci.c nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); nvec 1560 drivers/ata/ahci.c if (nvec == 1) nvec 1561 drivers/ata/ahci.c return nvec; nvec 109 drivers/base/platform-msi.c static void platform_msi_free_descs(struct device *dev, int base, int nvec) nvec 115 drivers/base/platform-msi.c desc->platform.msi_index < (base + nvec)) { nvec 123 drivers/base/platform-msi.c int nvec, nvec 136 drivers/base/platform-msi.c for (i = 0; i < nvec; i++) { nvec 148 drivers/base/platform-msi.c if (i != nvec) { nvec 150 drivers/base/platform-msi.c platform_msi_free_descs(dev, base, nvec); nvec 158 drivers/base/platform-msi.c static int platform_msi_alloc_descs(struct device *dev, int nvec, nvec 162 drivers/base/platform-msi.c return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data); nvec 196 drivers/base/platform-msi.c platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec, nvec 206 drivers/base/platform-msi.c if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS) nvec 251 drivers/base/platform-msi.c int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, nvec 257 drivers/base/platform-msi.c priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); nvec 261 drivers/base/platform-msi.c err = platform_msi_alloc_descs(dev, nvec, priv_data); nvec 265 drivers/base/platform-msi.c err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); nvec 272 drivers/base/platform-msi.c platform_msi_free_descs(dev, 0, nvec); nvec 325 drivers/base/platform-msi.c unsigned int nvec, nvec 335 drivers/base/platform-msi.c data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg); nvec 341 drivers/base/platform-msi.c is_tree ? 0 : nvec, nvec 346 drivers/base/platform-msi.c err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg); nvec 368 drivers/base/platform-msi.c unsigned int nvec) nvec 375 drivers/base/platform-msi.c if (!(desc->irq >= virq && desc->irq < (virq + nvec))) nvec 214 drivers/infiniband/hw/qib/qib_pcie.c int nvec; nvec 223 drivers/infiniband/hw/qib/qib_pcie.c nvec = -1; nvec 230 drivers/infiniband/hw/qib/qib_pcie.c nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags); nvec 231 drivers/infiniband/hw/qib/qib_pcie.c if (nvec < 0) nvec 240 drivers/infiniband/hw/qib/qib_pcie.c *nent = !dd->pcidev->msix_enabled ? 0 : nvec; nvec 284 drivers/infiniband/hw/qib/qib_pcie.c return nvec < 0 ? nvec : 0; nvec 3024 drivers/iommu/arm-smmu-v3.c int ret, nvec = ARM_SMMU_MAX_MSIS; nvec 3034 drivers/iommu/arm-smmu-v3.c nvec--; nvec 3045 drivers/iommu/arm-smmu-v3.c ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg); nvec 28 drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c int nvec, msi_alloc_info_t *info) nvec 50 drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c nvec = max_t(int, 32, roundup_pow_of_two(nvec)); nvec 51 drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info); nvec 54 drivers/irqchip/irq-gic-v3-its-pci-msi.c int nvec, msi_alloc_info_t *info) nvec 85 drivers/irqchip/irq-gic-v3-its-pci-msi.c nvec = max(nvec, alias_count); nvec 88 drivers/irqchip/irq-gic-v3-its-pci-msi.c nvec = max_t(int, minnvec, roundup_pow_of_two(nvec)); nvec 89 drivers/irqchip/irq-gic-v3-its-pci-msi.c return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info); nvec 47 drivers/irqchip/irq-gic-v3-its-platform-msi.c int nvec, msi_alloc_info_t *info) nvec 66 drivers/irqchip/irq-gic-v3-its-platform-msi.c nvec = max_t(int, 32, roundup_pow_of_two(nvec)); nvec 68 drivers/irqchip/irq-gic-v3-its-platform-msi.c dev, nvec, info); nvec 2495 drivers/irqchip/irq-gic-v3-its.c int nvec, msi_alloc_info_t *info) nvec 2537 drivers/irqchip/irq-gic-v3-its.c its_dev = its_create_device(its, dev_id, nvec, true); nvec 2543 drivers/irqchip/irq-gic-v3-its.c pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); nvec 15 drivers/misc/cxl/vphb.c static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) nvec 39 drivers/mmc/host/cavium-thunderx.c int nvec, ret, i; nvec 41 drivers/mmc/host/cavium-thunderx.c nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX); nvec 42 drivers/mmc/host/cavium-thunderx.c if (nvec < 0) nvec 43 drivers/mmc/host/cavium-thunderx.c return nvec; nvec 46 drivers/mmc/host/cavium-thunderx.c for (i = 0; i < nvec; i++) { nvec 1609 drivers/net/ethernet/freescale/enetc/enetc.c int i, n, err, nvec; nvec 1611 drivers/net/ethernet/freescale/enetc/enetc.c nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; nvec 1613 drivers/net/ethernet/freescale/enetc/enetc.c n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); nvec 1618 drivers/net/ethernet/freescale/enetc/enetc.c if (n != nvec) nvec 781 drivers/net/ethernet/marvell/octeontx2/af/cgx.c int err, nvec; nvec 811 drivers/net/ethernet/marvell/octeontx2/af/cgx.c nvec = CGX_NVEC; nvec 812 drivers/net/ethernet/marvell/octeontx2/af/cgx.c err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); nvec 813 drivers/net/ethernet/marvell/octeontx2/af/cgx.c if (err < 0 || err != nvec) { nvec 815 drivers/net/ethernet/marvell/octeontx2/af/cgx.c nvec, err); nvec 23 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c int nvec; nvec 48 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c return table->nvec - MLX5_IRQ_VEC_COMP_BASE; nvec 94 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c static int request_irqs(struct mlx5_core_dev *dev, int nvec) nvec 100 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < nvec; i++) { nvec 153 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (; vecidx < irq_table->nvec; vecidx++) { nvec 210 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table); nvec 214 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < nvec; i++) { nvec 231 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table); nvec 234 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < nvec; i++) nvec 256 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < table->nvec; i++) nvec 268 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c int nvec; nvec 271 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + nvec 273 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c nvec = min_t(int, nvec, num_eqs); nvec 274 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c if (nvec <= MLX5_IRQ_VEC_COMP_BASE) nvec 277 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL); nvec 281 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1, nvec 282 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c nvec, PCI_IRQ_MSIX); nvec 283 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c if (nvec < 0) { nvec 284 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c err = nvec; nvec 288 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c table->nvec = nvec; nvec 294 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c err = request_irqs(dev, nvec); nvec 328 drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c for (i = 0; i < table->nvec; i++) nvec 3079 drivers/net/vmxnet3/vmxnet3_drv.c vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec) nvec 3082 drivers/net/vmxnet3/vmxnet3_drv.c adapter->intr.msix_entries, nvec, nvec); nvec 3084 drivers/net/vmxnet3/vmxnet3_drv.c if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) { nvec 3087 drivers/net/vmxnet3/vmxnet3_drv.c nvec, VMXNET3_LINUX_MIN_MSIX_VECT); nvec 3127 drivers/net/vmxnet3/vmxnet3_drv.c int i, nvec; nvec 3129 drivers/net/vmxnet3/vmxnet3_drv.c nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? nvec 3131 drivers/net/vmxnet3/vmxnet3_drv.c nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ? nvec 3133 drivers/net/vmxnet3/vmxnet3_drv.c nvec += 1; /* for link event */ nvec 3134 drivers/net/vmxnet3/vmxnet3_drv.c nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ? nvec 3135 drivers/net/vmxnet3/vmxnet3_drv.c nvec : VMXNET3_LINUX_MIN_MSIX_VECT; nvec 3137 drivers/net/vmxnet3/vmxnet3_drv.c for (i = 0; i < nvec; i++) nvec 3140 drivers/net/vmxnet3/vmxnet3_drv.c nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); nvec 3141 drivers/net/vmxnet3/vmxnet3_drv.c if (nvec < 0) nvec 3147 drivers/net/vmxnet3/vmxnet3_drv.c if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { nvec 3157 drivers/net/vmxnet3/vmxnet3_drv.c adapter->intr.num_intrs = nvec; nvec 3164 drivers/net/vmxnet3/vmxnet3_drv.c "Limiting #rx queues to 1, try MSI.\n", nvec); nvec 570 drivers/ntb/hw/amd/ntb_hw_amd.c struct amd_ntb_vec *nvec = dev; nvec 572 drivers/ntb/hw/amd/ntb_hw_amd.c return ndev_interrupt(nvec->ndev, nvec->num); nvec 347 drivers/ntb/hw/intel/ntb_hw_gen1.c struct intel_ntb_vec *nvec = dev; nvec 349 drivers/ntb/hw/intel/ntb_hw_gen1.c dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n", nvec 350 drivers/ntb/hw/intel/ntb_hw_gen1.c irq, nvec->num); nvec 352 drivers/ntb/hw/intel/ntb_hw_gen1.c return ndev_interrupt(nvec->ndev, nvec->num); nvec 802 drivers/pci/controller/pcie-rcar.c struct pci_dev *pdev, int nvec, int type) nvec 819 drivers/pci/controller/pcie-rcar.c hwirq = rcar_msi_alloc_region(msi, nvec); nvec 827 drivers/pci/controller/pcie-rcar.c for (i = 0; i < nvec; i++) { nvec 841 drivers/pci/controller/pcie-rcar.c desc->nvec_used = nvec; nvec 842 drivers/pci/controller/pcie-rcar.c desc->msi_attrib.multiple = order_base_2(nvec); nvec 266 drivers/pci/controller/vmd.c int nvec, msi_alloc_info_t *arg) nvec 271 drivers/pci/controller/vmd.c if (nvec > vmd->msix_count) nvec 35 drivers/pci/msi.c static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 41 drivers/pci/msi.c return msi_domain_alloc_irqs(domain, &dev->dev, nvec); nvec 43 drivers/pci/msi.c return arch_setup_msi_irqs(dev, nvec, type); nvec 90 drivers/pci/msi.c int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) nvec 97 drivers/pci/msi.c return chip->setup_irqs(chip, dev, nvec, type); nvec 102 drivers/pci/msi.c if (type == PCI_CAP_ID_MSI && nvec > 1) nvec 558 drivers/pci/msi.c msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) nvec 565 drivers/pci/msi.c masks = irq_create_affinity_masks(nvec, affd); nvec 568 drivers/pci/msi.c entry = alloc_msi_entry(&dev->dev, nvec, masks); nvec 581 drivers/pci/msi.c entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); nvec 623 drivers/pci/msi.c static int msi_capability_init(struct pci_dev *dev, int nvec, nvec 632 drivers/pci/msi.c entry = msi_setup_entry(dev, nvec, affd); nvec 643 drivers/pci/msi.c ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); nvec 695 drivers/pci/msi.c struct msix_entry *entries, int nvec, nvec 704 drivers/pci/msi.c masks = irq_create_affinity_masks(nvec, affd); nvec 706 drivers/pci/msi.c for (i = 0, curmsk = masks; i < nvec; i++) { nvec 775 drivers/pci/msi.c int nvec, struct irq_affinity *affd) nvec 790 drivers/pci/msi.c ret = msix_setup_entries(dev, base, entries, nvec, affd); nvec 794 drivers/pci/msi.c ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); nvec 857 drivers/pci/msi.c static int pci_msi_supported(struct pci_dev *dev, int nvec) nvec 873 drivers/pci/msi.c if (nvec < 1) nvec 971 drivers/pci/msi.c int nvec, struct irq_affinity *affd, int flags) nvec 976 drivers/pci/msi.c if (!pci_msi_supported(dev, nvec)) nvec 982 drivers/pci/msi.c if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL)) nvec 987 drivers/pci/msi.c for (i = 0; i < nvec; i++) { nvec 990 drivers/pci/msi.c for (j = i + 1; j < nvec; j++) { nvec 1002 drivers/pci/msi.c return msix_capability_init(dev, entries, nvec, affd); nvec 1059 drivers/pci/msi.c int nvec; nvec 1077 drivers/pci/msi.c nvec = pci_msi_vec_count(dev); nvec 1078 drivers/pci/msi.c if (nvec < 0) nvec 1079 drivers/pci/msi.c return nvec; nvec 1080 drivers/pci/msi.c if (nvec < minvec) nvec 1083 drivers/pci/msi.c if (nvec > maxvec) nvec 1084 drivers/pci/msi.c nvec = maxvec; nvec 1088 drivers/pci/msi.c nvec = irq_calc_affinity_vectors(minvec, nvec, affd); nvec 1089 drivers/pci/msi.c if (nvec < minvec) nvec 1093 drivers/pci/msi.c rc = msi_capability_init(dev, nvec, affd); nvec 1095 drivers/pci/msi.c return nvec; nvec 1102 drivers/pci/msi.c nvec = rc; nvec 1121 drivers/pci/msi.c int rc, nvec = maxvec; nvec 1131 drivers/pci/msi.c nvec = irq_calc_affinity_vectors(minvec, nvec, affd); nvec 1132 drivers/pci/msi.c if (nvec < minvec) nvec 1136 drivers/pci/msi.c rc = __pci_enable_msix(dev, entries, nvec, affd, flags); nvec 1138 drivers/pci/msi.c return nvec; nvec 1145 drivers/pci/msi.c nvec = rc; nvec 48 drivers/pci/pcie/portdrv_core.c u32 nvec = 0, pos; nvec 62 drivers/pci/pcie/portdrv_core.c nvec = *pme + 1; nvec 74 drivers/pci/pcie/portdrv_core.c nvec = max(nvec, *aer + 1); nvec 85 drivers/pci/pcie/portdrv_core.c nvec = max(nvec, *dpc + 1); nvec 89 drivers/pci/pcie/portdrv_core.c return nvec; nvec 103 drivers/pci/pcie/portdrv_core.c int nr_entries, nvec, pcie_irq; nvec 113 drivers/pci/pcie/portdrv_core.c nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc); nvec 114 drivers/pci/pcie/portdrv_core.c if (nvec > nr_entries) { nvec 130 drivers/pci/pcie/portdrv_core.c if (nvec != nr_entries) { nvec 133 drivers/pci/pcie/portdrv_core.c nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, nvec 249 drivers/pci/xen-pcifront.c int vector[], int nvec) nvec 258 drivers/pci/xen-pcifront.c .value = nvec, nvec 264 drivers/pci/xen-pcifront.c if (nvec > SH_INFO_MAX_VEC) { nvec 266 drivers/pci/xen-pcifront.c " Increase SH_INFO_MAX_VEC\n", nvec); nvec 283 drivers/pci/xen-pcifront.c for (i = 0; i < nvec; i++) { nvec 869 drivers/scsi/arcmsr/arcmsr_hba.c int nvec, i; nvec 873 drivers/scsi/arcmsr/arcmsr_hba.c nvec = pci_alloc_irq_vectors(pdev, 1, ARCMST_NUM_MSIX_VECTORS, nvec 875 drivers/scsi/arcmsr/arcmsr_hba.c if (nvec > 0) { nvec 881 drivers/scsi/arcmsr/arcmsr_hba.c nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); nvec 882 drivers/scsi/arcmsr/arcmsr_hba.c if (nvec == 1) { nvec 887 drivers/scsi/arcmsr/arcmsr_hba.c nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); nvec 888 drivers/scsi/arcmsr/arcmsr_hba.c if (nvec < 1) nvec 894 drivers/scsi/arcmsr/arcmsr_hba.c acb->vector_count = nvec; nvec 895 drivers/scsi/arcmsr/arcmsr_hba.c for (i = 0; i < nvec; i++) { nvec 3553 drivers/scsi/be2iscsi/be_main.c int nvec = 1; nvec 3558 drivers/scsi/be2iscsi/be_main.c nvec = BEISCSI_MAX_NUM_CPUS + 1; nvec 3561 drivers/scsi/be2iscsi/be_main.c nvec = phba->fw_config.eqid_count; nvec 3564 drivers/scsi/be2iscsi/be_main.c nvec = 2; nvec 3569 drivers/scsi/be2iscsi/be_main.c if (enable_msix && nvec > 1) { nvec 3572 drivers/scsi/be2iscsi/be_main.c if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, nvec 3574 drivers/scsi/be2iscsi/be_main.c phba->num_cpus = nvec - 1; nvec 1151 drivers/scsi/bfa/bfad.c for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) { nvec 1153 drivers/scsi/bfa/bfad.c bfad->msix_tab[bfad->nvec].msix.entry = i; nvec 1154 drivers/scsi/bfa/bfad.c bfad->msix_tab[bfad->nvec].bfad = bfad; nvec 1155 drivers/scsi/bfa/bfad.c msix_entries[bfad->nvec].entry = i; nvec 1156 drivers/scsi/bfa/bfad.c bfad->nvec++; nvec 1169 drivers/scsi/bfa/bfad.c for (i = 0; i < bfad->nvec; i++) { nvec 1219 drivers/scsi/bfa/bfad.c msix_entries, bfad->nvec); nvec 1224 drivers/scsi/bfa/bfad.c bfad->pci_name, bfad->nvec, error); nvec 1225 drivers/scsi/bfa/bfad.c bfad->nvec = 1; nvec 1246 drivers/scsi/bfa/bfad.c for (i = 0; i < bfad->nvec; i++) { nvec 1251 drivers/scsi/bfa/bfad.c bfa_msix_init(&bfad->bfa, bfad->nvec); nvec 1275 drivers/scsi/bfa/bfad.c for (i = 0; i < bfad->nvec; i++) nvec 207 drivers/scsi/bfa/bfad_drv.h int nvec; nvec 102 drivers/soc/ti/ti_sci_inta_msi.c int ret, nvec; nvec 111 drivers/soc/ti/ti_sci_inta_msi.c nvec = ti_sci_inta_msi_alloc_descs(dev, res); nvec 112 drivers/soc/ti/ti_sci_inta_msi.c if (nvec <= 0) nvec 113 drivers/soc/ti/ti_sci_inta_msi.c return nvec; nvec 115 drivers/soc/ti/ti_sci_inta_msi.c ret = msi_domain_alloc_irqs(msi_domain, dev, nvec); nvec 107 drivers/staging/nvec/nvec.c int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, nvec 110 drivers/staging/nvec/nvec.c return atomic_notifier_chain_register(&nvec->notifier_list, nb); nvec 122 drivers/staging/nvec/nvec.c int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) nvec 124 drivers/staging/nvec/nvec.c return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); nvec 137 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = container_of(nb, struct nvec_chip, nvec 144 drivers/staging/nvec/nvec.c dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type); nvec 165 drivers/staging/nvec/nvec.c static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, nvec 171 drivers/staging/nvec/nvec.c if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) { nvec 172 drivers/staging/nvec/nvec.c dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i); nvec 173 drivers/staging/nvec/nvec.c return &nvec->msg_pool[i]; nvec 177 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "could not allocate %s buffer\n", nvec 190 drivers/staging/nvec/nvec.c void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) nvec 192 drivers/staging/nvec/nvec.c if (msg != &nvec->tx_scratch) nvec 193 drivers/staging/nvec/nvec.c dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); nvec 235 drivers/staging/nvec/nvec.c static void nvec_gpio_set_value(struct nvec_chip *nvec, int value) nvec 237 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "GPIO changed from %u to %u\n", nvec 238 drivers/staging/nvec/nvec.c gpiod_get_value(nvec->gpiod), value); nvec 239 drivers/staging/nvec/nvec.c gpiod_set_value(nvec->gpiod, value); nvec 254 drivers/staging/nvec/nvec.c int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, nvec 260 drivers/staging/nvec/nvec.c msg = nvec_msg_alloc(nvec, NVEC_MSG_TX); nvec 269 drivers/staging/nvec/nvec.c spin_lock_irqsave(&nvec->tx_lock, flags); nvec 270 drivers/staging/nvec/nvec.c list_add_tail(&msg->node, &nvec->tx_data); nvec 271 drivers/staging/nvec/nvec.c spin_unlock_irqrestore(&nvec->tx_lock, flags); nvec 273 drivers/staging/nvec/nvec.c schedule_work(&nvec->tx_work); nvec 296 drivers/staging/nvec/nvec.c int nvec_write_sync(struct nvec_chip *nvec, nvec 300 drivers/staging/nvec/nvec.c mutex_lock(&nvec->sync_write_mutex); nvec 303 drivers/staging/nvec/nvec.c nvec->sync_write_pending = (data[1] << 8) + data[0]; nvec 305 drivers/staging/nvec/nvec.c if (nvec_write_async(nvec, data, size) < 0) { nvec 306 drivers/staging/nvec/nvec.c mutex_unlock(&nvec->sync_write_mutex); nvec 310 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n", nvec 311 drivers/staging/nvec/nvec.c nvec->sync_write_pending); nvec 312 drivers/staging/nvec/nvec.c if (!(wait_for_completion_timeout(&nvec->sync_write, nvec 314 drivers/staging/nvec/nvec.c dev_warn(nvec->dev, nvec 316 drivers/staging/nvec/nvec.c mutex_unlock(&nvec->sync_write_mutex); nvec 320 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "nvec_sync_write: pong!\n"); nvec 322 drivers/staging/nvec/nvec.c *msg = nvec->last_sync_msg; nvec 324 drivers/staging/nvec/nvec.c mutex_unlock(&nvec->sync_write_mutex); nvec 337 drivers/staging/nvec/nvec.c static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state) nvec 341 drivers/staging/nvec/nvec.c nvec_write_async(nvec, global_events, 3); nvec 375 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work); nvec 380 drivers/staging/nvec/nvec.c spin_lock_irqsave(&nvec->tx_lock, flags); nvec 381 drivers/staging/nvec/nvec.c while (!list_empty(&nvec->tx_data)) { nvec 382 drivers/staging/nvec/nvec.c msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node); nvec 383 drivers/staging/nvec/nvec.c spin_unlock_irqrestore(&nvec->tx_lock, flags); nvec 384 drivers/staging/nvec/nvec.c nvec_gpio_set_value(nvec, 0); nvec 386 drivers/staging/nvec/nvec.c &nvec->ec_transfer, msecs_to_jiffies(5000)); nvec 389 drivers/staging/nvec/nvec.c dev_warn(nvec->dev, "timeout waiting for ec transfer\n"); nvec 390 drivers/staging/nvec/nvec.c nvec_gpio_set_value(nvec, 1); nvec 394 drivers/staging/nvec/nvec.c spin_lock_irqsave(&nvec->tx_lock, flags); nvec 398 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, msg); nvec 401 drivers/staging/nvec/nvec.c spin_unlock_irqrestore(&nvec->tx_lock, flags); nvec 412 drivers/staging/nvec/nvec.c static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) nvec 415 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data); nvec 424 drivers/staging/nvec/nvec.c atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, nvec 439 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work); nvec 443 drivers/staging/nvec/nvec.c spin_lock_irqsave(&nvec->rx_lock, flags); nvec 444 drivers/staging/nvec/nvec.c while (!list_empty(&nvec->rx_data)) { nvec 445 drivers/staging/nvec/nvec.c msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node); nvec 447 drivers/staging/nvec/nvec.c spin_unlock_irqrestore(&nvec->rx_lock, flags); nvec 449 drivers/staging/nvec/nvec.c if (nvec->sync_write_pending == nvec 451 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "sync write completed!\n"); nvec 452 drivers/staging/nvec/nvec.c nvec->sync_write_pending = 0; nvec 453 drivers/staging/nvec/nvec.c nvec->last_sync_msg = msg; nvec 454 drivers/staging/nvec/nvec.c complete(&nvec->sync_write); nvec 456 drivers/staging/nvec/nvec.c parse_msg(nvec, msg); nvec 457 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, msg); nvec 459 drivers/staging/nvec/nvec.c spin_lock_irqsave(&nvec->rx_lock, flags); nvec 461 drivers/staging/nvec/nvec.c spin_unlock_irqrestore(&nvec->rx_lock, flags); nvec 470 drivers/staging/nvec/nvec.c static void nvec_tx_completed(struct nvec_chip *nvec) nvec 473 drivers/staging/nvec/nvec.c if (nvec->tx->pos != nvec->tx->size) { nvec 474 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "premature END_TRANS, resending\n"); nvec 475 drivers/staging/nvec/nvec.c nvec->tx->pos = 0; nvec 476 drivers/staging/nvec/nvec.c nvec_gpio_set_value(nvec, 0); nvec 478 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 488 drivers/staging/nvec/nvec.c static void nvec_rx_completed(struct nvec_chip *nvec) nvec 490 drivers/staging/nvec/nvec.c if (nvec->rx->pos != nvec_msg_size(nvec->rx)) { nvec 491 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n", nvec 492 drivers/staging/nvec/nvec.c (uint)nvec_msg_size(nvec->rx), nvec 493 drivers/staging/nvec/nvec.c (uint)nvec->rx->pos); nvec 495 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, nvec->rx); nvec 496 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 499 drivers/staging/nvec/nvec.c if (nvec->rx->data[0] == NVEC_BAT) nvec 500 drivers/staging/nvec/nvec.c complete(&nvec->ec_transfer); nvec 505 drivers/staging/nvec/nvec.c spin_lock(&nvec->rx_lock); nvec 511 drivers/staging/nvec/nvec.c list_add_tail(&nvec->rx->node, &nvec->rx_data); nvec 513 drivers/staging/nvec/nvec.c spin_unlock(&nvec->rx_lock); nvec 515 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 517 drivers/staging/nvec/nvec.c if (!nvec_msg_is_event(nvec->rx)) nvec 518 drivers/staging/nvec/nvec.c complete(&nvec->ec_transfer); nvec 520 drivers/staging/nvec/nvec.c schedule_work(&nvec->rx_work); nvec 529 drivers/staging/nvec/nvec.c static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status, nvec 532 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n", nvec 533 drivers/staging/nvec/nvec.c status, nvec->state); nvec 535 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 546 drivers/staging/nvec/nvec.c static void nvec_tx_set(struct nvec_chip *nvec) nvec 548 drivers/staging/nvec/nvec.c spin_lock(&nvec->tx_lock); nvec 549 drivers/staging/nvec/nvec.c if (list_empty(&nvec->tx_data)) { nvec 550 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "empty tx - sending no-op\n"); nvec 551 drivers/staging/nvec/nvec.c memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3); nvec 552 drivers/staging/nvec/nvec.c nvec->tx_scratch.size = 3; nvec 553 drivers/staging/nvec/nvec.c nvec->tx_scratch.pos = 0; nvec 554 drivers/staging/nvec/nvec.c nvec->tx = &nvec->tx_scratch; nvec 555 drivers/staging/nvec/nvec.c list_add_tail(&nvec->tx->node, &nvec->tx_data); nvec 557 drivers/staging/nvec/nvec.c nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg, nvec 559 drivers/staging/nvec/nvec.c nvec->tx->pos = 0; nvec 561 drivers/staging/nvec/nvec.c spin_unlock(&nvec->tx_lock); nvec 563 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n", nvec 564 drivers/staging/nvec/nvec.c (uint)nvec->tx->size, nvec->tx->data[1]); nvec 582 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = dev; nvec 583 drivers/staging/nvec/nvec.c unsigned int state = nvec->state; nvec 585 drivers/staging/nvec/nvec.c status = readl(nvec->base + I2C_SL_STATUS); nvec 589 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "unexpected irq mask %lx\n", status); nvec 593 drivers/staging/nvec/nvec.c dev_err(nvec->dev, "Spurious IRQ\n"); nvec 599 drivers/staging/nvec/nvec.c received = readl(nvec->base + I2C_SL_RCVD); nvec 601 drivers/staging/nvec/nvec.c writel(0, nvec->base + I2C_SL_RCVD); nvec 605 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 607 drivers/staging/nvec/nvec.c switch (nvec->state) { nvec 610 drivers/staging/nvec/nvec.c nvec_invalid_flags(nvec, status, false); nvec 614 drivers/staging/nvec/nvec.c nvec_invalid_flags(nvec, status, true); nvec 616 drivers/staging/nvec/nvec.c nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX); nvec 618 drivers/staging/nvec/nvec.c if (unlikely(!nvec->rx)) { nvec 619 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 622 drivers/staging/nvec/nvec.c nvec->rx->data[0] = received; nvec 623 drivers/staging/nvec/nvec.c nvec->rx->pos = 1; nvec 624 drivers/staging/nvec/nvec.c nvec->state = 2; nvec 630 drivers/staging/nvec/nvec.c if (nvec->rx->data[0] != 0x01) { nvec 631 drivers/staging/nvec/nvec.c dev_err(nvec->dev, nvec 633 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 636 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, nvec->rx); nvec 637 drivers/staging/nvec/nvec.c nvec->state = 3; nvec 638 drivers/staging/nvec/nvec.c nvec_tx_set(nvec); nvec 639 drivers/staging/nvec/nvec.c to_send = nvec->tx->data[0]; nvec 640 drivers/staging/nvec/nvec.c nvec->tx->pos = 1; nvec 642 drivers/staging/nvec/nvec.c nvec->rx->data[1] = received; nvec 643 drivers/staging/nvec/nvec.c nvec->rx->pos = 2; nvec 644 drivers/staging/nvec/nvec.c nvec->state = 4; nvec 646 drivers/staging/nvec/nvec.c nvec_invalid_flags(nvec, status, true); nvec 651 drivers/staging/nvec/nvec.c nvec_tx_completed(nvec); nvec 653 drivers/staging/nvec/nvec.c nvec_invalid_flags(nvec, status, true); nvec 654 drivers/staging/nvec/nvec.c } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { nvec 655 drivers/staging/nvec/nvec.c to_send = nvec->tx->data[nvec->tx->pos++]; nvec 657 drivers/staging/nvec/nvec.c dev_err(nvec->dev, nvec 659 drivers/staging/nvec/nvec.c nvec->tx, nvec 660 drivers/staging/nvec/nvec.c (uint)(nvec->tx ? nvec->tx->pos : 0), nvec 661 drivers/staging/nvec/nvec.c (uint)(nvec->tx ? nvec->tx->size : 0)); nvec 662 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 667 drivers/staging/nvec/nvec.c nvec_rx_completed(nvec); nvec 669 drivers/staging/nvec/nvec.c nvec_invalid_flags(nvec, status, true); nvec 670 drivers/staging/nvec/nvec.c else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE) nvec 671 drivers/staging/nvec/nvec.c nvec->rx->data[nvec->rx->pos++] = received; nvec 673 drivers/staging/nvec/nvec.c dev_err(nvec->dev, nvec 675 drivers/staging/nvec/nvec.c nvec->rx, nvec->rx ? nvec->rx->pos : 0, nvec 679 drivers/staging/nvec/nvec.c nvec->state = 0; nvec 684 drivers/staging/nvec/nvec.c if (received != nvec->i2c_addr) nvec 685 drivers/staging/nvec/nvec.c dev_err(nvec->dev, nvec 687 drivers/staging/nvec/nvec.c received, nvec->i2c_addr); nvec 688 drivers/staging/nvec/nvec.c nvec->state = 1; nvec 693 drivers/staging/nvec/nvec.c writel(to_send, nvec->base + I2C_SL_RCVD); nvec 697 drivers/staging/nvec/nvec.c nvec_gpio_set_value(nvec, 1); nvec 699 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, nvec 721 drivers/staging/nvec/nvec.c static void tegra_init_i2c_slave(struct nvec_chip *nvec) nvec 725 drivers/staging/nvec/nvec.c clk_prepare_enable(nvec->i2c_clk); nvec 727 drivers/staging/nvec/nvec.c reset_control_assert(nvec->rst); nvec 729 drivers/staging/nvec/nvec.c reset_control_deassert(nvec->rst); nvec 733 drivers/staging/nvec/nvec.c writel(val, nvec->base + I2C_CNFG); nvec 735 drivers/staging/nvec/nvec.c clk_set_rate(nvec->i2c_clk, 8 * 80000); nvec 737 drivers/staging/nvec/nvec.c writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG); nvec 738 drivers/staging/nvec/nvec.c writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT); nvec 740 drivers/staging/nvec/nvec.c writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1); nvec 741 drivers/staging/nvec/nvec.c writel(0, nvec->base + I2C_SL_ADDR2); nvec 743 drivers/staging/nvec/nvec.c enable_irq(nvec->irq); nvec 747 drivers/staging/nvec/nvec.c static void nvec_disable_i2c_slave(struct nvec_chip *nvec) nvec 749 drivers/staging/nvec/nvec.c disable_irq(nvec->irq); nvec 750 drivers/staging/nvec/nvec.c writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG); nvec 751 drivers/staging/nvec/nvec.c clk_disable_unprepare(nvec->i2c_clk); nvec 768 drivers/staging/nvec/nvec.c struct nvec_chip *nvec; nvec 780 drivers/staging/nvec/nvec.c nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL); nvec 781 drivers/staging/nvec/nvec.c if (!nvec) nvec 784 drivers/staging/nvec/nvec.c platform_set_drvdata(pdev, nvec); nvec 785 drivers/staging/nvec/nvec.c nvec->dev = dev; nvec 787 drivers/staging/nvec/nvec.c if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) { nvec 796 drivers/staging/nvec/nvec.c nvec->irq = platform_get_irq(pdev, 0); nvec 797 drivers/staging/nvec/nvec.c if (nvec->irq < 0) nvec 806 drivers/staging/nvec/nvec.c nvec->rst = devm_reset_control_get_exclusive(dev, "i2c"); nvec 807 drivers/staging/nvec/nvec.c if (IS_ERR(nvec->rst)) { nvec 809 drivers/staging/nvec/nvec.c return PTR_ERR(nvec->rst); nvec 812 drivers/staging/nvec/nvec.c nvec->base = base; nvec 813 drivers/staging/nvec/nvec.c nvec->i2c_clk = i2c_clk; nvec 814 drivers/staging/nvec/nvec.c nvec->rx = &nvec->msg_pool[0]; nvec 816 drivers/staging/nvec/nvec.c ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list); nvec 818 drivers/staging/nvec/nvec.c init_completion(&nvec->sync_write); nvec 819 drivers/staging/nvec/nvec.c init_completion(&nvec->ec_transfer); nvec 820 drivers/staging/nvec/nvec.c mutex_init(&nvec->sync_write_mutex); nvec 821 drivers/staging/nvec/nvec.c spin_lock_init(&nvec->tx_lock); nvec 822 drivers/staging/nvec/nvec.c spin_lock_init(&nvec->rx_lock); nvec 823 drivers/staging/nvec/nvec.c INIT_LIST_HEAD(&nvec->rx_data); nvec 824 drivers/staging/nvec/nvec.c INIT_LIST_HEAD(&nvec->tx_data); nvec 825 drivers/staging/nvec/nvec.c INIT_WORK(&nvec->rx_work, nvec_dispatch); nvec 826 drivers/staging/nvec/nvec.c INIT_WORK(&nvec->tx_work, nvec_request_master); nvec 828 drivers/staging/nvec/nvec.c nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH); nvec 829 drivers/staging/nvec/nvec.c if (IS_ERR(nvec->gpiod)) { nvec 831 drivers/staging/nvec/nvec.c return PTR_ERR(nvec->gpiod); nvec 834 drivers/staging/nvec/nvec.c err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0, nvec 835 drivers/staging/nvec/nvec.c "nvec", nvec); nvec 840 drivers/staging/nvec/nvec.c disable_irq(nvec->irq); nvec 842 drivers/staging/nvec/nvec.c tegra_init_i2c_slave(nvec); nvec 845 drivers/staging/nvec/nvec.c nvec_toggle_global_events(nvec, true); nvec 847 drivers/staging/nvec/nvec.c nvec->nvec_status_notifier.notifier_call = nvec_status_notifier; nvec 848 drivers/staging/nvec/nvec.c nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0); nvec 850 drivers/staging/nvec/nvec.c nvec_power_handle = nvec; nvec 854 drivers/staging/nvec/nvec.c err = nvec_write_sync(nvec, get_firmware_version, 2, &msg); nvec 862 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, msg); nvec 871 drivers/staging/nvec/nvec.c nvec_write_async(nvec, unmute_speakers, 4); nvec 875 drivers/staging/nvec/nvec.c nvec_write_async(nvec, enable_event, 7); nvec 879 drivers/staging/nvec/nvec.c nvec_write_async(nvec, enable_event, 7); nvec 886 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = platform_get_drvdata(pdev); nvec 888 drivers/staging/nvec/nvec.c nvec_toggle_global_events(nvec, false); nvec 889 drivers/staging/nvec/nvec.c mfd_remove_devices(nvec->dev); nvec 890 drivers/staging/nvec/nvec.c nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); nvec 891 drivers/staging/nvec/nvec.c cancel_work_sync(&nvec->rx_work); nvec 892 drivers/staging/nvec/nvec.c cancel_work_sync(&nvec->tx_work); nvec 903 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = dev_get_drvdata(dev); nvec 907 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "suspending\n"); nvec 910 drivers/staging/nvec/nvec.c nvec_toggle_global_events(nvec, false); nvec 912 drivers/staging/nvec/nvec.c err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg); nvec 914 drivers/staging/nvec/nvec.c nvec_msg_free(nvec, msg); nvec 916 drivers/staging/nvec/nvec.c nvec_disable_i2c_slave(nvec); nvec 923 drivers/staging/nvec/nvec.c struct nvec_chip *nvec = dev_get_drvdata(dev); nvec 925 drivers/staging/nvec/nvec.c dev_dbg(nvec->dev, "resuming\n"); nvec 926 drivers/staging/nvec/nvec.c tegra_init_i2c_slave(nvec); nvec 927 drivers/staging/nvec/nvec.c nvec_toggle_global_events(nvec, true); nvec 164 drivers/staging/nvec/nvec.h int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, nvec 167 drivers/staging/nvec/nvec.h int nvec_write_sync(struct nvec_chip *nvec, nvec 171 drivers/staging/nvec/nvec.h int nvec_register_notifier(struct nvec_chip *nvec, nvec 177 drivers/staging/nvec/nvec.h void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg); nvec 34 drivers/staging/nvec/nvec_kbd.c struct nvec_chip *nvec; nvec 50 drivers/staging/nvec/nvec_kbd.c nvec_write_async(keys_dev.nvec, buf, sizeof(buf)); nvec 88 drivers/staging/nvec/nvec_kbd.c struct nvec_chip *nvec = keys_dev.nvec; nvec 101 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, buf, sizeof(buf)); nvec 108 drivers/staging/nvec/nvec_kbd.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 145 drivers/staging/nvec/nvec_kbd.c keys_dev.nvec = nvec; nvec 146 drivers/staging/nvec/nvec_kbd.c nvec_register_notifier(nvec, &keys_dev.notifier, 0); nvec 149 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, enable_kbd, 2); nvec 152 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, cnfg_wake, 4); nvec 154 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, cnfg_wake_key_reporting, 3); nvec 157 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, clear_leds, sizeof(clear_leds)); nvec 164 drivers/staging/nvec/nvec_kbd.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 168 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, uncnfg_wake_key_reporting, 3); nvec 169 drivers/staging/nvec/nvec_kbd.c nvec_write_async(nvec, disable_kbd, 2); nvec 170 drivers/staging/nvec/nvec_kbd.c nvec_unregister_notifier(nvec, &keys_dev.notifier); nvec 26 drivers/staging/nvec/nvec_paz00.c struct nvec_chip *nvec; nvec 37 drivers/staging/nvec/nvec_paz00.c nvec_write_async(led->nvec, buf, sizeof(buf)); nvec 44 drivers/staging/nvec/nvec_paz00.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 57 drivers/staging/nvec/nvec_paz00.c led->nvec = nvec; nvec 26 drivers/staging/nvec/nvec_power.c struct nvec_chip *nvec; nvec 116 drivers/staging/nvec/nvec_power.c nvec_write_async(power->nvec, buf, 2); nvec 358 drivers/staging/nvec/nvec_power.c nvec_write_async(power->nvec, buf, 2); nvec 367 drivers/staging/nvec/nvec_power.c nvec_write_async(power->nvec, buf, 2); nvec 377 drivers/staging/nvec/nvec_power.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 385 drivers/staging/nvec/nvec_power.c power->nvec = nvec; nvec 409 drivers/staging/nvec/nvec_power.c nvec_register_notifier(nvec, &power->notifier, NVEC_SYS); nvec 424 drivers/staging/nvec/nvec_power.c nvec_unregister_notifier(power->nvec, &power->notifier); nvec 44 drivers/staging/nvec/nvec_ps2.c struct nvec_chip *nvec; nvec 53 drivers/staging/nvec/nvec_ps2.c return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf)); nvec 60 drivers/staging/nvec/nvec_ps2.c nvec_write_async(ps2_dev.nvec, buf, sizeof(buf)); nvec 70 drivers/staging/nvec/nvec_ps2.c return nvec_write_async(ps2_dev.nvec, buf, sizeof(buf)); nvec 103 drivers/staging/nvec/nvec_ps2.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 120 drivers/staging/nvec/nvec_ps2.c ps2_dev.nvec = nvec; nvec 121 drivers/staging/nvec/nvec_ps2.c nvec_register_notifier(nvec, &ps2_dev.notifier, 0); nvec 130 drivers/staging/nvec/nvec_ps2.c struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); nvec 134 drivers/staging/nvec/nvec_ps2.c nvec_unregister_notifier(nvec, &ps2_dev.notifier); nvec 568 drivers/target/iscsi/iscsi_target.c static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec, nvec 883 drivers/target/iscsi/iscsi_target.c static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec, nvec 912 drivers/target/iscsi/iscsi_target.c if (WARN_ON_ONCE(!sg || i >= nvec)) nvec 932 drivers/target/iscsi/iscsi_target.c data_offset, orig_data_length, i, nvec); nvec 1045 drivers/thunderbolt/nhi.c int res, irq, nvec; nvec 1060 drivers/thunderbolt/nhi.c nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, nvec 1062 drivers/thunderbolt/nhi.c if (nvec < 0) { nvec 1063 drivers/thunderbolt/nhi.c nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); nvec 1064 drivers/thunderbolt/nhi.c if (nvec < 0) nvec 1065 drivers/thunderbolt/nhi.c return nvec; nvec 247 drivers/vfio/pci/vfio_pci_intrs.c static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) nvec 256 drivers/vfio/pci/vfio_pci_intrs.c vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); nvec 261 drivers/vfio/pci/vfio_pci_intrs.c ret = pci_alloc_irq_vectors(pdev, 1, nvec, flag); nvec 262 drivers/vfio/pci/vfio_pci_intrs.c if (ret < nvec) { nvec 269 drivers/vfio/pci/vfio_pci_intrs.c vdev->num_ctx = nvec; nvec 278 drivers/vfio/pci/vfio_pci_intrs.c vdev->msi_qmax = fls(nvec * 2 - 1) - 1; nvec 384 drivers/xen/events/events_base.c static int __must_check xen_allocate_irqs_dynamic(int nvec) nvec 386 drivers/xen/events/events_base.c int i, irq = irq_alloc_descs(-1, 0, nvec, -1); nvec 389 drivers/xen/events/events_base.c for (i = 0; i < nvec; i++) nvec 731 drivers/xen/events/events_base.c int pirq, int nvec, const char *name, domid_t domid) nvec 737 drivers/xen/events/events_base.c irq = xen_allocate_irqs_dynamic(nvec); nvec 741 drivers/xen/events/events_base.c for (i = 0; i < nvec; i++) { nvec 757 drivers/xen/events/events_base.c while (nvec--) nvec 758 drivers/xen/events/events_base.c __unbind_from_irq(irq + nvec); nvec 440 fs/cifs/cifsproto.h unsigned int *nbytes, struct kvec *iov, const int nvec); nvec 816 fs/cifs/smb2misc.c smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec) nvec 851 fs/cifs/smb2misc.c for (i = 0; i < nvec; i++) { nvec 259 fs/cifs/smb2proto.h struct kvec *iov, int nvec); nvec 276 fs/cifs/transport.c int nvec; nvec 282 fs/cifs/transport.c nvec = rqst->rq_nvec - 1; nvec 285 fs/cifs/transport.c nvec = rqst->rq_nvec; nvec 289 fs/cifs/transport.c for (i = 0; i < nvec; i++) nvec 631 fs/dlm/lowcomms.c int nvec; nvec 662 fs/dlm/lowcomms.c nvec = 1; nvec 672 fs/dlm/lowcomms.c nvec = 2; nvec 675 fs/dlm/lowcomms.c iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len); nvec 345 include/linux/interrupt.h irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); nvec 382 include/linux/interrupt.h irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) nvec 184 include/linux/msi.h struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, nvec 202 include/linux/msi.h int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); nvec 218 include/linux/msi.h int nvec, int type); nvec 266 include/linux/msi.h struct device *dev, int nvec, nvec 331 include/linux/msi.h int nvec); nvec 338 include/linux/msi.h int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, nvec 344 include/linux/msi.h int nvec, msi_alloc_info_t *args); nvec 346 include/linux/msi.h int virq, int nvec, msi_alloc_info_t *args); nvec 349 include/linux/msi.h unsigned int nvec, nvec 355 include/linux/msi.h #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \ nvec 356 include/linux/msi.h __platform_msi_create_device_domain(dev, nvec, false, write, ops, data) nvec 357 include/linux/msi.h #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \ nvec 358 include/linux/msi.h __platform_msi_create_device_domain(dev, nvec, true, write, ops, data) nvec 363 include/linux/msi.h unsigned int nvec); nvec 1443 include/linux/pci.h struct msix_entry *entries, int nvec) nvec 1445 include/linux/pci.h int rc = pci_enable_msix_range(dev, entries, nvec, nvec); nvec 1472 include/linux/pci.h struct msix_entry *entries, int nvec) nvec 113 include/xen/events.h int pirq, int nvec, const char *name, domid_t domid); nvec 29 kernel/irq/msi.c struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, nvec 40 kernel/irq/msi.c desc->nvec_used = nvec; nvec 43 kernel/irq/msi.c nvec * sizeof(*desc->affinity), GFP_KERNEL); nvec 198 kernel/irq/msi.c int nvec, msi_alloc_info_t *arg) nvec 302 kernel/irq/msi.c int nvec, msi_alloc_info_t *arg) nvec 310 kernel/irq/msi.c ret = ops->msi_prepare(domain, dev, nvec, arg); nvec 316 kernel/irq/msi.c int virq, int nvec, msi_alloc_info_t *arg) nvec 330 kernel/irq/msi.c if (!(desc->irq >= virq && desc->irq < (virq + nvec))) nvec 346 kernel/irq/msi.c if (!(desc->irq >= virq && desc->irq < (virq + nvec))) nvec 400 kernel/irq/msi.c int nvec) nvec 410 kernel/irq/msi.c ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);