num_vfs 176 arch/powerpc/include/asm/machdep.h int (*pcibios_sriov_enable)(struct pci_dev *pdev, u16 num_vfs); num_vfs 207 arch/powerpc/include/asm/pci-bridge.h u16 num_vfs; /* number of VFs enabled*/ num_vfs 246 arch/powerpc/kernel/pci-common.c int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 249 arch/powerpc/kernel/pci-common.c return ppc_md.pcibios_sriov_enable(pdev, num_vfs); num_vfs 965 arch/powerpc/platforms/powernv/pci-ioda.c u16 num_vfs; num_vfs 978 arch/powerpc/platforms/powernv/pci-ioda.c num_vfs = pdn->num_vfs; num_vfs 993 arch/powerpc/platforms/powernv/pci-ioda.c res2.end = res2.start + (size * num_vfs) - 1; num_vfs 997 arch/powerpc/platforms/powernv/pci-ioda.c i, &res2, res, num_vfs, offset); num_vfs 1021 arch/powerpc/platforms/powernv/pci-ioda.c num_vfs, offset); num_vfs 1301 arch/powerpc/platforms/powernv/pci-ioda.c static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) num_vfs 1316 arch/powerpc/platforms/powernv/pci-ioda.c m64_bars = num_vfs; num_vfs 1334 arch/powerpc/platforms/powernv/pci-ioda.c static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) num_vfs 1356 arch/powerpc/platforms/powernv/pci-ioda.c m64_bars = num_vfs; num_vfs 1435 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_vf_release_m64(pdev, num_vfs); num_vfs 1500 arch/powerpc/platforms/powernv/pci-ioda.c u16 num_vfs, i; num_vfs 1506 arch/powerpc/platforms/powernv/pci-ioda.c num_vfs = pdn->num_vfs; num_vfs 1516 arch/powerpc/platforms/powernv/pci-ioda.c pnv_pci_vf_release_m64(pdev, num_vfs); num_vfs 1520 arch/powerpc/platforms/powernv/pci-ioda.c for (i = 0; i < num_vfs; i++) { num_vfs 1528 arch/powerpc/platforms/powernv/pci-ioda.c bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); num_vfs 1541 arch/powerpc/platforms/powernv/pci-ioda.c static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) num_vfs 1560 arch/powerpc/platforms/powernv/pci-ioda.c for (vf_index = 0; vf_index < num_vfs; vf_index++) { num_vfs 1613 arch/powerpc/platforms/powernv/pci-ioda.c int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 1639 arch/powerpc/platforms/powernv/pci-ioda.c if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) { num_vfs 1646 arch/powerpc/platforms/powernv/pci-ioda.c pdn->pe_num_map = kmalloc_array(num_vfs, num_vfs 1656 arch/powerpc/platforms/powernv/pci-ioda.c for (i = 0; i < num_vfs; i++) num_vfs 1661 arch/powerpc/platforms/powernv/pci-ioda.c for (i = 0; i < num_vfs; i++) { num_vfs 1674 arch/powerpc/platforms/powernv/pci-ioda.c 0, num_vfs, 0); num_vfs 1677 arch/powerpc/platforms/powernv/pci-ioda.c dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs); num_vfs 1681 arch/powerpc/platforms/powernv/pci-ioda.c bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); num_vfs 1684 arch/powerpc/platforms/powernv/pci-ioda.c pdn->num_vfs = num_vfs; num_vfs 1687 arch/powerpc/platforms/powernv/pci-ioda.c ret = pnv_pci_vf_assign_m64(pdev, num_vfs); num_vfs 1706 arch/powerpc/platforms/powernv/pci-ioda.c pnv_ioda_setup_vf_PE(pdev, num_vfs); num_vfs 1712 arch/powerpc/platforms/powernv/pci-ioda.c for (i = 0; i < num_vfs; i++) { num_vfs 1720 arch/powerpc/platforms/powernv/pci-ioda.c bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs); num_vfs 1737 arch/powerpc/platforms/powernv/pci-ioda.c int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 1742 arch/powerpc/platforms/powernv/pci-ioda.c return pnv_pci_sriov_enable(pdev, num_vfs); num_vfs 59 arch/powerpc/platforms/pseries/pci.c u16 num_vfs, num_vfs 79 arch/powerpc/platforms/pseries/pci.c num_vfs * sizeof(struct pe_map_bar_entry)); num_vfs 105 arch/powerpc/platforms/pseries/pci.c int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs) num_vfs 119 arch/powerpc/platforms/pseries/pci.c for (vf_index = 0; vf_index < num_vfs; vf_index++) { num_vfs 138 arch/powerpc/platforms/pseries/pci.c rc = pseries_send_map_pe(pdev, num_vfs, vf_pe_array); num_vfs 141 arch/powerpc/platforms/pseries/pci.c for (vf_index = 0; vf_index < num_vfs; vf_index++) num_vfs 149 arch/powerpc/platforms/pseries/pci.c int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 164 arch/powerpc/platforms/pseries/pci.c if (max_config_vfs < num_vfs && num_vfs > MAX_VFS_FOR_MAP_PE) { num_vfs 167 arch/powerpc/platforms/pseries/pci.c num_vfs, (num_vfs > MAX_VFS_FOR_MAP_PE) ? num_vfs 173 arch/powerpc/platforms/pseries/pci.c pdn->pe_num_map = kmalloc_array(num_vfs, num_vfs 179 arch/powerpc/platforms/pseries/pci.c rc = pseries_associate_pes(pdev, num_vfs); num_vfs 192 arch/powerpc/platforms/pseries/pci.c int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 196 arch/powerpc/platforms/pseries/pci.c return pseries_pci_sriov_enable(pdev, num_vfs); num_vfs 20 drivers/crypto/cavium/cpt/cptpf_main.c static u32 num_vfs = 4; /* Default 4 VF enabled */ num_vfs 21 drivers/crypto/cavium/cpt/cptpf_main.c module_param(num_vfs, uint, 0444); num_vfs 22 drivers/crypto/cavium/cpt/cptpf_main.c MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)"); num_vfs 501 drivers/crypto/cavium/cpt/cptpf_main.c static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) num_vfs 514 drivers/crypto/cavium/cpt/cptpf_main.c cpt->num_vf_en = num_vfs; /* User requested VFs */ num_vfs 547 drivers/crypto/cavium/cpt/cptpf_main.c if (num_vfs > 16 || num_vfs < 4) { num_vfs 549 drivers/crypto/cavium/cpt/cptpf_main.c num_vfs); num_vfs 550 drivers/crypto/cavium/cpt/cptpf_main.c num_vfs = 4; num_vfs 605 drivers/crypto/cavium/cpt/cptpf_main.c err = cpt_sriov_init(cpt, num_vfs); num_vfs 160 drivers/crypto/cavium/nitrox/nitrox_dev.h int num_vfs; num_vfs 53 drivers/crypto/cavium/nitrox/nitrox_main.c int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs); num_vfs 55 drivers/crypto/cavium/nitrox/nitrox_main.c int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 171 drivers/crypto/cavium/nitrox/nitrox_mbx.c ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, num_vfs 176 drivers/crypto/cavium/nitrox/nitrox_mbx.c for (i = 0; i < ndev->iov.num_vfs; i++) { num_vfs 15 drivers/crypto/cavium/nitrox/nitrox_sriov.c static inline bool num_vfs_valid(int num_vfs) num_vfs 19 drivers/crypto/cavium/nitrox/nitrox_sriov.c switch (num_vfs) { num_vfs 31 drivers/crypto/cavium/nitrox/nitrox_sriov.c static inline enum vf_mode num_vfs_to_mode(int num_vfs) num_vfs 35 drivers/crypto/cavium/nitrox/nitrox_sriov.c switch (num_vfs) { num_vfs 153 drivers/crypto/cavium/nitrox/nitrox_sriov.c static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) num_vfs 158 drivers/crypto/cavium/nitrox/nitrox_sriov.c if (!num_vfs_valid(num_vfs)) { num_vfs 159 drivers/crypto/cavium/nitrox/nitrox_sriov.c dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs); num_vfs 163 drivers/crypto/cavium/nitrox/nitrox_sriov.c if (pci_num_vf(pdev) == num_vfs) num_vfs 164 drivers/crypto/cavium/nitrox/nitrox_sriov.c return num_vfs; num_vfs 166 drivers/crypto/cavium/nitrox/nitrox_sriov.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 171 drivers/crypto/cavium/nitrox/nitrox_sriov.c dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); num_vfs 173 drivers/crypto/cavium/nitrox/nitrox_sriov.c ndev->mode = num_vfs_to_mode(num_vfs); num_vfs 174 drivers/crypto/cavium/nitrox/nitrox_sriov.c ndev->iov.num_vfs = num_vfs; num_vfs 188 drivers/crypto/cavium/nitrox/nitrox_sriov.c return num_vfs; num_vfs 194 drivers/crypto/cavium/nitrox/nitrox_sriov.c ndev->iov.num_vfs = 0; num_vfs 216 drivers/crypto/cavium/nitrox/nitrox_sriov.c ndev->iov.num_vfs = 0; num_vfs 228 drivers/crypto/cavium/nitrox/nitrox_sriov.c int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 230 drivers/crypto/cavium/nitrox/nitrox_sriov.c if (!num_vfs) num_vfs 233 drivers/crypto/cavium/nitrox/nitrox_sriov.c return nitrox_sriov_enable(pdev, num_vfs); num_vfs 169 drivers/crypto/hisilicon/zip/zip_main.c u32 num_vfs; num_vfs 377 drivers/crypto/hisilicon/zip/zip_main.c if (val > ctrl->num_vfs) num_vfs 384 drivers/crypto/hisilicon/zip/zip_main.c vfq_num = (qm->ctrl_qp_num - qm->qp_num) / ctrl->num_vfs; num_vfs 385 drivers/crypto/hisilicon/zip/zip_main.c if (val == ctrl->num_vfs) num_vfs 387 drivers/crypto/hisilicon/zip/zip_main.c qm->qp_num - (ctrl->num_vfs - 1) * vfq_num; num_vfs 740 drivers/crypto/hisilicon/zip/zip_main.c static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, int num_vfs) num_vfs 748 drivers/crypto/hisilicon/zip/zip_main.c if (!num_vfs) num_vfs 752 drivers/crypto/hisilicon/zip/zip_main.c if (remain_q_num < num_vfs) num_vfs 755 drivers/crypto/hisilicon/zip/zip_main.c q_num = remain_q_num / num_vfs; num_vfs 756 drivers/crypto/hisilicon/zip/zip_main.c for (i = 1; i <= num_vfs; i++) { num_vfs 757 drivers/crypto/hisilicon/zip/zip_main.c if (i == num_vfs) num_vfs 758 drivers/crypto/hisilicon/zip/zip_main.c q_num += remain_q_num % num_vfs; num_vfs 772 drivers/crypto/hisilicon/zip/zip_main.c u32 i, num_vfs = ctrl->num_vfs; num_vfs 775 drivers/crypto/hisilicon/zip/zip_main.c for (i = 1; i <= num_vfs; i++) { num_vfs 781 drivers/crypto/hisilicon/zip/zip_main.c ctrl->num_vfs = 0; num_vfs 789 drivers/crypto/hisilicon/zip/zip_main.c int pre_existing_vfs, num_vfs, ret; num_vfs 799 drivers/crypto/hisilicon/zip/zip_main.c num_vfs = min_t(int, max_vfs, HZIP_VF_NUM); num_vfs 801 drivers/crypto/hisilicon/zip/zip_main.c ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs); num_vfs 807 drivers/crypto/hisilicon/zip/zip_main.c hisi_zip->ctrl->num_vfs = num_vfs; num_vfs 809 drivers/crypto/hisilicon/zip/zip_main.c ret = pci_enable_sriov(pdev, num_vfs); num_vfs 816 drivers/crypto/hisilicon/zip/zip_main.c return num_vfs; num_vfs 835 drivers/crypto/hisilicon/zip/zip_main.c static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 837 drivers/crypto/hisilicon/zip/zip_main.c if (num_vfs == 0) num_vfs 840 drivers/crypto/hisilicon/zip/zip_main.c return hisi_zip_sriov_enable(pdev, num_vfs); num_vfs 848 drivers/crypto/hisilicon/zip/zip_main.c if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0) num_vfs 343 drivers/crypto/qat/qat_common/adf_pf2vf_msg.c int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); num_vfs 345 drivers/crypto/qat/qat_common/adf_pf2vf_msg.c for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { num_vfs 226 drivers/fpga/dfl-pci.c static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) num_vfs 232 drivers/fpga/dfl-pci.c if (!num_vfs) { num_vfs 246 drivers/fpga/dfl-pci.c ret = dfl_fpga_cdev_config_ports_vf(cdev, num_vfs); num_vfs 250 drivers/fpga/dfl-pci.c ret = pci_enable_sriov(pcidev, num_vfs); num_vfs 257 drivers/fpga/dfl-pci.c return num_vfs; num_vfs 1205 drivers/fpga/dfl.c int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) num_vfs 1216 drivers/fpga/dfl.c if (cdev->released_port_num != num_vfs) { num_vfs 173 drivers/infiniband/hw/bnxt_re/bnxt_re.h u32 num_vfs; num_vfs 127 drivers/infiniband/hw/bnxt_re/main.c u32 num_vfs; num_vfs 146 drivers/infiniband/hw/bnxt_re/main.c if (rdev->num_vfs) { num_vfs 152 drivers/infiniband/hw/bnxt_re/main.c num_vfs = 100 * rdev->num_vfs; num_vfs 153 drivers/infiniband/hw/bnxt_re/main.c vf_qps = (rdev->qplib_ctx.qpc_count * vf_pct) / num_vfs; num_vfs 154 drivers/infiniband/hw/bnxt_re/main.c vf_srqs = (rdev->qplib_ctx.srqc_count * vf_pct) / num_vfs; num_vfs 155 drivers/infiniband/hw/bnxt_re/main.c vf_cqs = (rdev->qplib_ctx.cq_count * vf_pct) / num_vfs; num_vfs 166 drivers/infiniband/hw/bnxt_re/main.c vf_mrws = rdev->qplib_ctx.mrw_count * vf_pct / num_vfs; num_vfs 169 drivers/infiniband/hw/bnxt_re/main.c BNXT_RE_RESVD_MR_FOR_PF) / rdev->num_vfs; num_vfs 188 drivers/infiniband/hw/bnxt_re/main.c static void bnxt_re_sriov_config(void *p, int num_vfs) num_vfs 195 drivers/infiniband/hw/bnxt_re/main.c rdev->num_vfs = num_vfs; num_vfs 1873 drivers/infiniband/hw/i40iw/i40iw_main.c u32 num_vfs) num_vfs 1881 drivers/infiniband/hw/i40iw/i40iw_main.c if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT) num_vfs 1884 drivers/infiniband/hw/i40iw/i40iw_main.c hdl->device.max_enabled_vfs = num_vfs; num_vfs 218 drivers/infiniband/hw/mlx4/alias_GUID.c if (slave_id >= dev->dev->persist->num_vfs + 1) num_vfs 592 drivers/infiniband/hw/mlx4/alias_GUID.c if (!entry || entry > dev->dev->persist->num_vfs || num_vfs 2171 drivers/infiniband/hw/mlx4/mad.c (u16)(dev->dev->persist->num_vfs + 1)); num_vfs 2408 drivers/infiniband/hw/mlx4/main.c for (slave = 0; slave <= ibdev->dev->persist->num_vfs; num_vfs 775 drivers/infiniband/hw/mlx4/sysfs.c for (i = 0; i <= device->dev->persist->num_vfs; ++i) num_vfs 790 drivers/infiniband/hw/mlx4/sysfs.c for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) { num_vfs 1121 drivers/misc/genwqe/card_base.c cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); num_vfs 1122 drivers/misc/genwqe/card_base.c if (cd->num_vfs < 0) num_vfs 1123 drivers/misc/genwqe/card_base.c cd->num_vfs = 0; num_vfs 303 drivers/misc/genwqe/card_base.h int num_vfs; num_vfs 182 drivers/misc/genwqe/card_debugfs.c for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { num_vfs 202 drivers/misc/genwqe/card_debugfs.c for (vf_num = 0; vf_num < cd->num_vfs; vf_num++) { num_vfs 544 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs); num_vfs 616 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } num_vfs 980 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 num_vfs; num_vfs 1004 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h __le16 num_vfs; num_vfs 352 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) num_vfs 357 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c for (i = 0; i < num_vfs; i++) { num_vfs 364 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) num_vfs 373 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { num_vfs 405 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) num_vfs 410 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); num_vfs 414 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bnxt_set_vf_attr(bp, num_vfs); num_vfs 416 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; num_vfs 430 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) { num_vfs 503 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) num_vfs 548 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_cp_rings /= num_vfs; num_vfs 549 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_tx_rings /= num_vfs; num_vfs 550 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_rx_rings /= num_vfs; num_vfs 551 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_vnics /= num_vfs; num_vfs 552 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_stat_ctx /= num_vfs; num_vfs 553 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_ring_grps /= num_vfs; num_vfs 571 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c req.max_msix = cpu_to_le16(vf_msix / num_vfs); num_vfs 574 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c for (i = 0; i < num_vfs; i++) { num_vfs 609 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) num_vfs 622 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; num_vfs 623 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; num_vfs 626 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c num_vfs; num_vfs 629 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c num_vfs; num_vfs 630 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; num_vfs 631 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; num_vfs 632 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; num_vfs 662 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c for (i = 0; i < num_vfs; i++) { num_vfs 681 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; num_vfs 682 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; num_vfs 683 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; num_vfs 684 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_rsscos_ctxs -= num_vfs; num_vfs 685 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; num_vfs 686 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c hw_resc->max_vnics -= vf_vnics * num_vfs; num_vfs 692 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) num_vfs 695 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); num_vfs 697 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c return bnxt_hwrm_func_cfg(bp, num_vfs); num_vfs 700 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) num_vfs 710 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c rc = bnxt_func_cfg(bp, *num_vfs, reset); num_vfs 711 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (rc != *num_vfs) { num_vfs 714 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c *num_vfs = 0; num_vfs 719 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c *num_vfs = rc; num_vfs 722 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bnxt_ulp_sriov_cfg(bp, *num_vfs); num_vfs 726 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) num_vfs 738 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c vfs_supported = *num_vfs; num_vfs 781 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (vfs_supported != *num_vfs) { num_vfs 783 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c *num_vfs, vfs_supported); num_vfs 784 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c *num_vfs = vfs_supported; num_vfs 787 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c rc = bnxt_alloc_vf_resources(bp, *num_vfs); num_vfs 791 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); num_vfs 795 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c rc = pci_enable_sriov(bp->pdev, *num_vfs); num_vfs 803 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); num_vfs 813 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c u16 num_vfs = pci_num_vf(bp->pdev); num_vfs 815 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (!num_vfs) num_vfs 826 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c num_vfs); num_vfs 830 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bnxt_hwrm_func_vf_resource_free(bp, num_vfs); num_vfs 845 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 871 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c num_vfs = 0; num_vfs 876 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (num_vfs && num_vfs == bp->pf.active_vfs) num_vfs 881 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (!num_vfs) num_vfs 884 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c bnxt_sriov_enable(bp, &num_vfs); num_vfs 890 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c return num_vfs; num_vfs 1177 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) num_vfs 1179 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c if (*num_vfs) num_vfs 38 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); num_vfs 39 drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); num_vfs 302 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs) num_vfs 322 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c ops->ulp_sriov_config(ulp->handle, num_vfs); num_vfs 96 drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); num_vfs 263 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c u16 num_vfs, i; num_vfs 268 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c num_vfs = pci_num_vf(bp->pdev); num_vfs 269 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c for (i = 0; i < num_vfs; i++) { num_vfs 294 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c u16 num_vfs = pci_num_vf(bp->pdev); num_vfs 298 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c for (i = 0; i < num_vfs; i++) { num_vfs 396 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); num_vfs 404 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); num_vfs 418 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c for (i = 0; i < num_vfs; i++) { num_vfs 148 drivers/net/ethernet/cavium/liquidio/lio_main.c static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); num_vfs 3867 drivers/net/ethernet/cavium/liquidio/lio_main.c static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) num_vfs 3872 drivers/net/ethernet/cavium/liquidio/lio_main.c if ((num_vfs == oct->sriov_info.num_vfs_alloced) && num_vfs 3875 drivers/net/ethernet/cavium/liquidio/lio_main.c oct->pf_num, num_vfs); num_vfs 3879 drivers/net/ethernet/cavium/liquidio/lio_main.c if (!num_vfs) { num_vfs 3882 drivers/net/ethernet/cavium/liquidio/lio_main.c } else if (num_vfs > oct->sriov_info.max_vfs) { num_vfs 3885 drivers/net/ethernet/cavium/liquidio/lio_main.c oct->sriov_info.max_vfs, num_vfs); num_vfs 3888 drivers/net/ethernet/cavium/liquidio/lio_main.c oct->sriov_info.num_vfs_alloced = num_vfs; num_vfs 3891 drivers/net/ethernet/cavium/liquidio/lio_main.c oct->pf_num, num_vfs); num_vfs 494 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c int i, num_vfs; num_vfs 502 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c num_vfs = oct->sriov_info.num_vfs_alloced; num_vfs 504 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c oct->vf_rep_list.num_vfs = 0; num_vfs 505 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c for (i = 0; i < num_vfs; i++) { num_vfs 543 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c oct->vf_rep_list.num_vfs++; num_vfs 558 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { num_vfs 570 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c oct->vf_rep_list.num_vfs = 0; num_vfs 588 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { num_vfs 603 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c oct->vf_rep_list.num_vfs = 0; num_vfs 418 drivers/net/ethernet/cavium/liquidio/octeon_device.h int num_vfs; num_vfs 965 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h u8 num_vfs; num_vfs 2826 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (vf >= adap->num_vfs) num_vfs 2862 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (vf >= adap->num_vfs) num_vfs 2959 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (vf >= adap->num_vfs || vlan > 4095 || qos > 7) num_vfs 2984 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (vf >= adap->num_vfs) num_vfs 5506 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 5532 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (num_vfs != 0 && current_vfs != 0) num_vfs 5536 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (num_vfs == current_vfs) num_vfs 5537 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c return num_vfs; num_vfs 5540 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (!num_vfs) { num_vfs 5548 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->num_vfs = 0; num_vfs 5633 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 5635 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c pr_info("Unable to instantiate %d VFs\n", num_vfs); num_vfs 5646 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->num_vfs = num_vfs; num_vfs 5647 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c return num_vfs; num_vfs 171 drivers/net/ethernet/cisco/enic/enic.h u16 num_vfs; num_vfs 353 drivers/net/ethernet/cisco/enic/enic_main.c return vf >= 0 && vf < enic->num_vfs; num_vfs 2798 drivers/net/ethernet/cisco/enic/enic_main.c &enic->num_vfs); num_vfs 2799 drivers/net/ethernet/cisco/enic/enic_main.c if (enic->num_vfs) { num_vfs 2800 drivers/net/ethernet/cisco/enic/enic_main.c err = pci_enable_sriov(pdev, enic->num_vfs); num_vfs 2808 drivers/net/ethernet/cisco/enic/enic_main.c num_pps = enic->num_vfs; num_vfs 43 drivers/net/ethernet/cisco/enic/enic_pp.c if (vf < 0 || vf >= enic->num_vfs) { num_vfs 648 drivers/net/ethernet/emulex/benet/be.h u16 num_vfs; /* Number of VFs provisioned by PF */ num_vfs 694 drivers/net/ethernet/emulex/benet/be.h for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ num_vfs 4494 drivers/net/ethernet/emulex/benet/be_cmds.c port_res->max_vfs += le16_to_cpu(pcie->num_vfs); num_vfs 4503 drivers/net/ethernet/emulex/benet/be_cmds.c res->max_vfs = le16_to_cpu(pcie->num_vfs); num_vfs 4591 drivers/net/ethernet/emulex/benet/be_cmds.c pcie->num_vfs = 0xFFFF; num_vfs 4629 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_resources pool_res, u16 num_vfs, num_vfs 4643 drivers/net/ethernet/emulex/benet/be_cmds.c desc.pcie.sriov_state = num_vfs ? 1 : 0; num_vfs 4644 drivers/net/ethernet/emulex/benet/be_cmds.c desc.pcie.num_vfs = cpu_to_le16(num_vfs); num_vfs 2175 drivers/net/ethernet/emulex/benet/be_cmds.h u16 num_vfs; num_vfs 2509 drivers/net/ethernet/emulex/benet/be_cmds.h struct be_resources res, u16 num_vfs, num_vfs 32 drivers/net/ethernet/emulex/benet/be_main.c static unsigned int num_vfs; num_vfs 33 drivers/net/ethernet/emulex/benet/be_main.c module_param(num_vfs, uint, 0444); num_vfs 34 drivers/net/ethernet/emulex/benet/be_main.c MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); num_vfs 1865 drivers/net/ethernet/emulex/benet/be_main.c if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) num_vfs 1905 drivers/net/ethernet/emulex/benet/be_main.c if (vf >= adapter->num_vfs) num_vfs 1986 drivers/net/ethernet/emulex/benet/be_main.c if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7) num_vfs 2022 drivers/net/ethernet/emulex/benet/be_main.c if (vf >= adapter->num_vfs) num_vfs 2081 drivers/net/ethernet/emulex/benet/be_main.c if (vf >= adapter->num_vfs) num_vfs 2106 drivers/net/ethernet/emulex/benet/be_main.c if (vf >= adapter->num_vfs) num_vfs 3938 drivers/net/ethernet/emulex/benet/be_main.c adapter->num_vfs = 0; num_vfs 4024 drivers/net/ethernet/emulex/benet/be_main.c static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, num_vfs 4033 drivers/net/ethernet/emulex/benet/be_main.c if (num_vfs) { num_vfs 4038 drivers/net/ethernet/emulex/benet/be_main.c res.max_rss_qs / (num_vfs + 1)); num_vfs 4044 drivers/net/ethernet/emulex/benet/be_main.c if (num_vfs >= be_max_pf_pool_rss_tables(adapter)) num_vfs 4081 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1); num_vfs 4082 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1); num_vfs 4088 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1); num_vfs 4091 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_vlans = res.max_vlans / (num_vfs + 1); num_vfs 4094 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1); num_vfs 4097 drivers/net/ethernet/emulex/benet/be_main.c vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1); num_vfs 4191 drivers/net/ethernet/emulex/benet/be_main.c adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg), num_vfs 4271 drivers/net/ethernet/emulex/benet/be_main.c status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); num_vfs 4274 drivers/net/ethernet/emulex/benet/be_main.c adapter->num_vfs = 0; num_vfs 4316 drivers/net/ethernet/emulex/benet/be_main.c bool use_sriov = adapter->num_vfs ? 1 : 0; num_vfs 4450 drivers/net/ethernet/emulex/benet/be_main.c adapter->num_vfs = old_vfs; num_vfs 4897 drivers/net/ethernet/emulex/benet/be_main.c if (adapter->num_vfs) num_vfs 6179 drivers/net/ethernet/emulex/benet/be_main.c static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 6185 drivers/net/ethernet/emulex/benet/be_main.c if (!num_vfs) num_vfs 6188 drivers/net/ethernet/emulex/benet/be_main.c adapter->num_vfs = num_vfs; num_vfs 6190 drivers/net/ethernet/emulex/benet/be_main.c if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) { num_vfs 6205 drivers/net/ethernet/emulex/benet/be_main.c be_calculate_vf_res(adapter, adapter->num_vfs, num_vfs 6208 drivers/net/ethernet/emulex/benet/be_main.c adapter->num_vfs, &vft_res); num_vfs 6225 drivers/net/ethernet/emulex/benet/be_main.c if (adapter->num_vfs) num_vfs 6229 drivers/net/ethernet/emulex/benet/be_main.c return adapter->num_vfs; num_vfs 6264 drivers/net/ethernet/emulex/benet/be_main.c if (num_vfs > 0) { num_vfs 47 drivers/net/ethernet/freescale/enetc/enetc_msg.c for (i = 0; i < pf->num_vfs; i++) { num_vfs 129 drivers/net/ethernet/freescale/enetc/enetc_msg.c for (i = 0; i < pf->num_vfs; i++) { num_vfs 159 drivers/net/ethernet/freescale/enetc/enetc_msg.c for (i = 0; i < pf->num_vfs; i++) num_vfs 624 drivers/net/ethernet/freescale/enetc/enetc_pf.c static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 630 drivers/net/ethernet/freescale/enetc/enetc_pf.c if (!num_vfs) { num_vfs 633 drivers/net/ethernet/freescale/enetc/enetc_pf.c pf->num_vfs = 0; num_vfs 636 drivers/net/ethernet/freescale/enetc/enetc_pf.c pf->num_vfs = num_vfs; num_vfs 638 drivers/net/ethernet/freescale/enetc/enetc_pf.c pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), num_vfs 641 drivers/net/ethernet/freescale/enetc/enetc_pf.c pf->num_vfs = 0; num_vfs 651 drivers/net/ethernet/freescale/enetc/enetc_pf.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 658 drivers/net/ethernet/freescale/enetc/enetc_pf.c return num_vfs; num_vfs 664 drivers/net/ethernet/freescale/enetc/enetc_pf.c pf->num_vfs = 0; num_vfs 669 drivers/net/ethernet/freescale/enetc/enetc_pf.c #define enetc_sriov_configure(pdev, num_vfs) (void)0 num_vfs 908 drivers/net/ethernet/freescale/enetc/enetc_pf.c if (pf->num_vfs) num_vfs 32 drivers/net/ethernet/freescale/enetc/enetc_pf.h int num_vfs; /* number of active VFs, after sriov_init */ num_vfs 1944 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 1953 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (num_vfs) { num_vfs 1954 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ret = pci_enable_sriov(pdev, num_vfs); num_vfs 1958 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c return num_vfs; num_vfs 218 drivers/net/ethernet/intel/fm10k/fm10k.h unsigned int num_vfs; num_vfs 536 drivers/net/ethernet/intel/fm10k/fm10k.h int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs); num_vfs 179 drivers/net/ethernet/intel/fm10k/fm10k_iov.c i = iov_data->num_vfs; num_vfs 226 drivers/net/ethernet/intel/fm10k/fm10k_iov.c for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) { num_vfs 287 drivers/net/ethernet/intel/fm10k/fm10k_iov.c int num_vfs, i; num_vfs 290 drivers/net/ethernet/intel/fm10k/fm10k_iov.c num_vfs = iov_data ? iov_data->num_vfs : 0; num_vfs 297 drivers/net/ethernet/intel/fm10k/fm10k_iov.c for (i = 0; i < num_vfs; i++) { num_vfs 332 drivers/net/ethernet/intel/fm10k/fm10k_iov.c int num_vfs, i; num_vfs 335 drivers/net/ethernet/intel/fm10k/fm10k_iov.c num_vfs = iov_data ? iov_data->num_vfs : 0; num_vfs 348 drivers/net/ethernet/intel/fm10k/fm10k_iov.c hw->iov.ops.assign_resources(hw, num_vfs, num_vfs); num_vfs 362 drivers/net/ethernet/intel/fm10k/fm10k_iov.c for (i = 0; i < num_vfs; i++) { num_vfs 395 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (vf_idx >= iov_data->num_vfs) num_vfs 423 drivers/net/ethernet/intel/fm10k/fm10k_iov.c static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs) num_vfs 440 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (!num_vfs) num_vfs 444 drivers/net/ethernet/intel/fm10k/fm10k_iov.c size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]); num_vfs 450 drivers/net/ethernet/intel/fm10k/fm10k_iov.c iov_data->num_vfs = num_vfs; num_vfs 453 drivers/net/ethernet/intel/fm10k/fm10k_iov.c for (i = 0; i < num_vfs; i++) { num_vfs 491 drivers/net/ethernet/intel/fm10k/fm10k_iov.c int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 499 drivers/net/ethernet/intel/fm10k/fm10k_iov.c num_vfs = current_vfs; num_vfs 506 drivers/net/ethernet/intel/fm10k/fm10k_iov.c err = fm10k_iov_alloc_data(pdev, num_vfs); num_vfs 511 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (num_vfs && num_vfs != current_vfs) { num_vfs 512 drivers/net/ethernet/intel/fm10k/fm10k_iov.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 520 drivers/net/ethernet/intel/fm10k/fm10k_iov.c return num_vfs; num_vfs 553 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (!iov_data || vf_idx >= iov_data->num_vfs) num_vfs 578 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (!iov_data || vf_idx >= iov_data->num_vfs) num_vfs 614 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (!iov_data || vf_idx >= iov_data->num_vfs) num_vfs 639 drivers/net/ethernet/intel/fm10k/fm10k_iov.c if (!iov_data || vf_idx >= iov_data->num_vfs) num_vfs 443 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (!hw->iov.num_vfs) num_vfs 585 drivers/net/ethernet/intel/fm10k/fm10k_pf.c u16 num_vfs = hw->iov.num_vfs; num_vfs 588 drivers/net/ethernet/intel/fm10k/fm10k_pf.c vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx); num_vfs 619 drivers/net/ethernet/intel/fm10k/fm10k_pf.c static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, num_vfs 631 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs)) num_vfs 635 drivers/net/ethernet/intel/fm10k/fm10k_pf.c hw->iov.num_vfs = num_vfs; num_vfs 639 drivers/net/ethernet/intel/fm10k/fm10k_pf.c qmap_stride = (num_vfs > 8) ? 32 : 256; num_vfs 648 drivers/net/ethernet/intel/fm10k/fm10k_pf.c for (i = 0; i < num_vfs; i++) { num_vfs 656 drivers/net/ethernet/intel/fm10k/fm10k_pf.c for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;) num_vfs 683 drivers/net/ethernet/intel/fm10k/fm10k_pf.c fm10k_vf_vector_index(hw, num_vfs - 1)); num_vfs 686 drivers/net/ethernet/intel/fm10k/fm10k_pf.c for (i = 0; i < num_vfs; i++) { num_vfs 741 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (vf_idx >= hw->iov.num_vfs) num_vfs 799 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (vf_idx >= hw->iov.num_vfs) num_vfs 813 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (vf_idx == (hw->iov.num_vfs - 1)) num_vfs 837 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs) num_vfs 841 drivers/net/ethernet/intel/fm10k/fm10k_pf.c qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; num_vfs 953 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (vf_idx >= hw->iov.num_vfs) num_vfs 969 drivers/net/ethernet/intel/fm10k/fm10k_pf.c qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256; num_vfs 1017 drivers/net/ethernet/intel/fm10k/fm10k_pf.c if (vf_idx == (hw->iov.num_vfs - 1)) num_vfs 627 drivers/net/ethernet/intel/fm10k/fm10k_type.h u16 num_vfs; num_vfs 1084 drivers/net/ethernet/intel/i40e/i40e.h void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); num_vfs 218 drivers/net/ethernet/intel/i40e/i40e_client.c void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs) num_vfs 234 drivers/net/ethernet/intel/i40e/i40e_client.c cdev->client->ops->vf_enable(&cdev->lan_info, cdev->client, num_vfs); num_vfs 165 drivers/net/ethernet/intel/i40e/i40e_client.h struct i40e_client *client, u32 num_vfs); num_vfs 1327 drivers/net/ethernet/intel/i40e/i40e_common.c u32 num_vfs; num_vfs 1355 drivers/net/ethernet/intel/i40e/i40e_common.c num_vfs = (j - i) + 1; num_vfs 1357 drivers/net/ethernet/intel/i40e/i40e_common.c num_vfs = 0; num_vfs 1371 drivers/net/ethernet/intel/i40e/i40e_common.c for (i = 0; i < num_vfs; i++) num_vfs 3253 drivers/net/ethernet/intel/i40e/i40e_common.c p->num_vfs = number; num_vfs 9590 drivers/net/ethernet/intel/i40e/i40e_main.c pf->hw.pf_id, pf->hw.func_caps.num_vfs, num_vfs 9605 drivers/net/ethernet/intel/i40e/i40e_main.c pf->hw.dev_caps.num_vfs); num_vfs 9615 drivers/net/ethernet/intel/i40e/i40e_main.c + pf->hw.func_caps.num_vfs) num_vfs 11987 drivers/net/ethernet/intel/i40e/i40e_main.c if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { num_vfs 11991 drivers/net/ethernet/intel/i40e/i40e_main.c pf->hw.func_caps.num_vfs, num_vfs 323 drivers/net/ethernet/intel/i40e/i40e_type.h u32 num_vfs; num_vfs 1640 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs) num_vfs 1654 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (pre_existing_vfs && pre_existing_vfs != num_vfs) num_vfs 1656 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c else if (pre_existing_vfs && pre_existing_vfs == num_vfs) num_vfs 1659 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (num_vfs > pf->num_req_vfs) { num_vfs 1661 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c num_vfs, pf->num_req_vfs); num_vfs 1666 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs); num_vfs 1667 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c err = i40e_alloc_vfs(pf, num_vfs); num_vfs 1674 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c return num_vfs; num_vfs 1690 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 1700 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (num_vfs) { num_vfs 1705 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ret = i40e_pci_sriov_enable(pdev, num_vfs); num_vfs 119 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); num_vfs 1401 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs) num_vfs 1417 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c if (pre_existing_vfs && pre_existing_vfs != num_vfs) num_vfs 1419 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c else if (pre_existing_vfs && pre_existing_vfs == num_vfs) num_vfs 1420 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c return num_vfs; num_vfs 1422 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c if (num_vfs > pf->num_vfs_supported) { num_vfs 1424 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c num_vfs, pf->num_vfs_supported); num_vfs 1428 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c dev_info(dev, "Allocating %d VFs\n", num_vfs); num_vfs 1429 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c err = ice_alloc_vfs(pf, num_vfs); num_vfs 1436 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c return num_vfs; num_vfs 1446 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 1456 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c if (num_vfs) num_vfs 1457 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c return ice_pci_sriov_ena(pf, num_vfs); num_vfs 100 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); num_vfs 141 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h int __always_unused num_vfs) num_vfs 183 drivers/net/ethernet/intel/igb/igb_main.c static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); num_vfs 199 drivers/net/ethernet/intel/igb/igb_main.c static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs); num_vfs 3514 drivers/net/ethernet/intel/igb/igb_main.c static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs) num_vfs 3523 drivers/net/ethernet/intel/igb/igb_main.c if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { num_vfs 3527 drivers/net/ethernet/intel/igb/igb_main.c if (!num_vfs) num_vfs 3535 drivers/net/ethernet/intel/igb/igb_main.c adapter->vfs_allocated_count = num_vfs; num_vfs 8957 drivers/net/ethernet/intel/igb/igb_main.c static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs) num_vfs 8959 drivers/net/ethernet/intel/igb/igb_main.c int err = igb_enable_sriov(dev, num_vfs); num_vfs 8966 drivers/net/ethernet/intel/igb/igb_main.c return num_vfs; num_vfs 8973 drivers/net/ethernet/intel/igb/igb_main.c static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) num_vfs 8976 drivers/net/ethernet/intel/igb/igb_main.c if (num_vfs == 0) num_vfs 8979 drivers/net/ethernet/intel/igb/igb_main.c return igb_pci_enable_sriov(dev, num_vfs); num_vfs 739 drivers/net/ethernet/intel/ixgbe/ixgbe.h unsigned int num_vfs; num_vfs 650 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c for (vf = 0; vf < adapter->num_vfs; vf++) { num_vfs 695 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c for (vf = 0; vf < adapter->num_vfs; vf++) { num_vfs 2078 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c for (i = 0; i < adapter->num_vfs; i++) { num_vfs 2700 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ((vf > adapter->num_vfs) || num_vfs 2894 drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c unsigned int pf_pool = adapter->num_vfs; num_vfs 697 drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c if (adapter->num_vfs && num_vfs 2443 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->num_vfs > 32) { num_vfs 2444 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; num_vfs 3695 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->num_vfs || (adapter->num_rx_queues > 1 && num_vfs 4251 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < adapter->num_vfs; i++) { num_vfs 4351 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->num_vfs) num_vfs 5262 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ((vf > adapter->num_vfs) || num_vfs 6120 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (adapter->num_vfs) { num_vfs 6125 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0 ; i < adapter->num_vfs; i++) num_vfs 7565 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!adapter->num_vfs) num_vfs 7572 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < adapter->num_vfs; i++) { num_vfs 7631 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (vf = 0; vf < adapter->num_vfs; ++vf) { num_vfs 7650 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->num_vfs == 0) num_vfs 9281 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c unsigned int num_vfs = adapter->num_vfs, vf; num_vfs 9286 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (vf = 0; vf < num_vfs; ++vf) { num_vfs 9970 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c num_pools = adapter->num_vfs + adapter->num_rx_pools; num_vfs 9987 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (!adapter->num_vfs) num_vfs 9994 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c num_pools = adapter->num_vfs + adapter->num_rx_pools; num_vfs 10092 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; num_vfs 11139 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); num_vfs 11140 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < adapter->num_vfs; i++) num_vfs 11305 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c adapter->num_vfs == 0) num_vfs 25 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c unsigned int num_vfs) num_vfs 32 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); num_vfs 51 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c unsigned int num_vfs) num_vfs 66 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), num_vfs 71 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c adapter->num_vfs = num_vfs; num_vfs 73 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c ixgbe_alloc_vf_macvlans(adapter, num_vfs); num_vfs 74 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; num_vfs 81 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) { num_vfs 84 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c } else if (num_vfs < 32) { num_vfs 96 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0; i < num_vfs; i++) { num_vfs 114 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c e_info(probe, "SR-IOV enabled with %d VFs\n", num_vfs); num_vfs 142 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 156 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c unsigned int num_vfs; num_vfs 169 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c num_vfs = pre_existing_vfs; num_vfs 181 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c num_vfs = min_t(unsigned int, max_vfs, IXGBE_MAX_VFS_DRV_LIMIT); num_vfs 183 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c err = pci_enable_sriov(adapter->pdev, num_vfs); num_vfs 190 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (!__ixgbe_enable_sriov(adapter, num_vfs)) { num_vfs 206 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c unsigned int num_vfs = adapter->num_vfs, vf; num_vfs 210 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c adapter->num_vfs = 0; num_vfs 213 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (vf = 0; vf < num_vfs; ++vf) { num_vfs 266 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) num_vfs 274 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (pre_existing_vfs && pre_existing_vfs != num_vfs) num_vfs 276 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c else if (pre_existing_vfs && pre_existing_vfs == num_vfs) num_vfs 277 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c return num_vfs; num_vfs 300 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (num_vfs > (limit - num_rx_pools)) { num_vfs 306 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c err = __ixgbe_enable_sriov(adapter, num_vfs); num_vfs 310 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0; i < num_vfs; i++) num_vfs 316 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c err = pci_enable_sriov(dev, num_vfs); num_vfs 323 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c return num_vfs; num_vfs 350 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs) num_vfs 352 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (num_vfs == 0) num_vfs 355 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c return ixgbe_pci_sriov_enable(dev, num_vfs); num_vfs 413 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0; i < adapter->num_vfs; i++) { num_vfs 1312 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (vf = 0; vf < adapter->num_vfs; vf++) { num_vfs 1356 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0 ; i < adapter->num_vfs; i++) { num_vfs 1369 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 1476 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) num_vfs 1581 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c for (i = 0; i < adapter->num_vfs; i++) { num_vfs 1596 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 1630 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 1673 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 1685 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 1707 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c if (vf >= adapter->num_vfs) num_vfs 39 drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); num_vfs 1953 drivers/net/ethernet/mellanox/mlx4/cmd.c int num_vfs; num_vfs 1966 drivers/net/ethernet/mellanox/mlx4/cmd.c num_vfs = (available_vpp / num_vfs 1971 drivers/net/ethernet/mellanox/mlx4/cmd.c vpp_param[i] = num_vfs; num_vfs 1987 drivers/net/ethernet/mellanox/mlx4/cmd.c port_qos->num_of_qos_vfs = num_vfs; num_vfs 2730 drivers/net/ethernet/mellanox/mlx4/cmd.c if ((vf < 0) || (vf >= dev->persist->num_vfs)) { num_vfs 2732 drivers/net/ethernet/mellanox/mlx4/cmd.c vf, dev->persist->num_vfs); num_vfs 2741 drivers/net/ethernet/mellanox/mlx4/cmd.c if (slave < 1 || slave > dev->persist->num_vfs) { num_vfs 2832 drivers/net/ethernet/mellanox/mlx4/cmd.c for (i = 0; i < dev->persist->num_vfs + 1; i++) { num_vfs 2852 drivers/net/ethernet/mellanox/mlx4/cmd.c for (i = 0; i < dev->persist->num_vfs + 1; i++) { num_vfs 167 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i <= dev->persist->num_vfs; i++) { num_vfs 228 drivers/net/ethernet/mellanox/mlx4/eq.c if (slave < 0 || slave > dev->persist->num_vfs || num_vfs 278 drivers/net/ethernet/mellanox/mlx4/eq.c if (dev->persist->num_vfs < slave) num_vfs 297 drivers/net/ethernet/mellanox/mlx4/eq.c if (dev->persist->num_vfs < slave) num_vfs 352 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < dev->persist->num_vfs + 1; i++) num_vfs 614 drivers/net/ethernet/mellanox/mlx4/eq.c for (i = 0; i < dev->persist->num_vfs + 1; num_vfs 658 drivers/net/ethernet/mellanox/mlx4/eq.c i < dev->persist->num_vfs + 1; num_vfs 84 drivers/net/ethernet/mellanox/mlx4/main.c static uint8_t num_vfs[3] = {0, 0, 0}; num_vfs 86 drivers/net/ethernet/mellanox/mlx4/main.c module_param_array(num_vfs, byte, &num_vfs_argc, 0444); num_vfs 87 drivers/net/ethernet/mellanox/mlx4/main.c MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" num_vfs 550 drivers/net/ethernet/mellanox/mlx4/main.c if (enable_4k_uar || !dev->persist->num_vfs) num_vfs 1382 drivers/net/ethernet/mellanox/mlx4/main.c dev->persist->num_vfs + 1); num_vfs 1385 drivers/net/ethernet/mellanox/mlx4/main.c if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { num_vfs 1393 drivers/net/ethernet/mellanox/mlx4/main.c nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + num_vfs 1394 drivers/net/ethernet/mellanox/mlx4/main.c bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; num_vfs 2183 drivers/net/ethernet/mellanox/mlx4/main.c (dev->persist->num_vfs + 1))) && num_vfs 2350 drivers/net/ethernet/mellanox/mlx4/main.c if (enable_4k_uar || !dev->persist->num_vfs) { num_vfs 3258 drivers/net/ethernet/mellanox/mlx4/main.c dev->persist->num_vfs = total_vfs; num_vfs 3265 drivers/net/ethernet/mellanox/mlx4/main.c dev->persist->num_vfs = 0; num_vfs 3384 drivers/net/ethernet/mellanox/mlx4/main.c dev->persist->num_vfs = total_vfs; num_vfs 3636 drivers/net/ethernet/mellanox/mlx4/main.c if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) num_vfs 3699 drivers/net/ethernet/mellanox/mlx4/main.c if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) num_vfs 3736 drivers/net/ethernet/mellanox/mlx4/main.c nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; num_vfs 3946 drivers/net/ethernet/mellanox/mlx4/main.c if (persist->num_vfs) num_vfs 4202 drivers/net/ethernet/mellanox/mlx4/main.c total_vfs = dev->persist->num_vfs; num_vfs 4330 drivers/net/ethernet/mellanox/mlx4/main.c total_vfs = dev->persist->num_vfs; num_vfs 4395 drivers/net/ethernet/mellanox/mlx4/main.c total_vfs = dev->persist->num_vfs; num_vfs 1120 drivers/net/ethernet/mellanox/mlx4/port.c dev->persist->num_vfs + 1); num_vfs 1122 drivers/net/ethernet/mellanox/mlx4/port.c vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; num_vfs 1157 drivers/net/ethernet/mellanox/mlx4/port.c dev->persist->num_vfs + 1); num_vfs 1160 drivers/net/ethernet/mellanox/mlx4/port.c vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1; num_vfs 1211 drivers/net/ethernet/mellanox/mlx4/port.c if (slave < 0 || slave > dev->persist->num_vfs) num_vfs 1864 drivers/net/ethernet/mellanox/mlx4/port.c unsigned num_vfs; num_vfs 1871 drivers/net/ethernet/mellanox/mlx4/port.c num_vfs = bitmap_weight(slaves_pport.slaves, num_vfs 1872 drivers/net/ethernet/mellanox/mlx4/port.c dev->persist->num_vfs + 1) - 1; num_vfs 1888 drivers/net/ethernet/mellanox/mlx4/port.c else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * num_vfs 1889 drivers/net/ethernet/mellanox/mlx4/port.c (vf_gids / num_vfs + 1)) num_vfs 1891 drivers/net/ethernet/mellanox/mlx4/port.c (vf_gids / num_vfs + 1)) + 1; num_vfs 1895 drivers/net/ethernet/mellanox/mlx4/port.c ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / num_vfs 1896 drivers/net/ethernet/mellanox/mlx4/port.c (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; num_vfs 1916 drivers/net/ethernet/mellanox/mlx4/port.c dev->persist->num_vfs + 1); num_vfs 1946 drivers/net/ethernet/mellanox/mlx4/port.c dev->persist->num_vfs + 1); num_vfs 318 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (slave > dev->persist->num_vfs) num_vfs 324 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (dev->persist->num_vfs + 1) + slave] : num_vfs 363 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (dev->persist->num_vfs + 1) + slave] += count; num_vfs 387 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (slave > dev->persist->num_vfs) num_vfs 394 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (dev->persist->num_vfs + 1) + slave] : num_vfs 410 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (dev->persist->num_vfs + 1) + slave] -= count; num_vfs 429 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (2 * (dev->persist->num_vfs + 1)); num_vfs 528 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c res_alloc->quota = kmalloc_array(dev->persist->num_vfs + 1, num_vfs 531 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c res_alloc->guaranteed = kmalloc_array(dev->persist->num_vfs + 1, num_vfs 537 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c (dev->persist->num_vfs + 1), num_vfs 541 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c kcalloc(dev->persist->num_vfs + 1, num_vfs 552 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c for (t = 0; t < dev->persist->num_vfs + 1; t++) { num_vfs 3352 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (slave < 0 || slave > dev->persist->num_vfs || num_vfs 1858 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) num_vfs 1911 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw->esw_funcs.num_vfs, esw->enabled_vports); num_vfs 1935 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw->esw_funcs.num_vfs, esw->enabled_vports); num_vfs 2608 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) num_vfs 2615 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw->esw_funcs.num_vfs = num_vfs; num_vfs 2623 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out, num_vfs 210 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h u16 num_vfs; num_vfs 593 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); num_vfs 614 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {} num_vfs 358 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) { num_vfs 1373 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); num_vfs 1471 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c __unload_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type); num_vfs 1567 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c err = __load_reps_vf_vport(esw, esw->esw_funcs.num_vfs, rep_type); num_vfs 2036 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c int num_vfs = esw->esw_funcs.num_vfs; num_vfs 2043 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); num_vfs 2097 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) num_vfs 2101 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c if (esw->esw_funcs.num_vfs > 0) { num_vfs 2102 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs); num_vfs 2110 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c esw->esw_funcs.num_vfs = new_num_vfs; num_vfs 2389 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { num_vfs 2449 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) { num_vfs 133 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); num_vfs 71 drivers/net/ethernet/mellanox/mlx5/core/sriov.c static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) num_vfs 80 drivers/net/ethernet/mellanox/mlx5/core/sriov.c mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs); num_vfs 89 drivers/net/ethernet/mellanox/mlx5/core/sriov.c for (vf = 0; vf < num_vfs; vf++) { num_vfs 114 drivers/net/ethernet/mellanox/mlx5/core/sriov.c int num_vfs = pci_num_vf(dev->pdev); num_vfs 118 drivers/net/ethernet/mellanox/mlx5/core/sriov.c for (vf = num_vfs - 1; vf >= 0; vf--) { num_vfs 136 drivers/net/ethernet/mellanox/mlx5/core/sriov.c static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs) num_vfs 141 drivers/net/ethernet/mellanox/mlx5/core/sriov.c err = mlx5_device_enable_sriov(dev, num_vfs); num_vfs 147 drivers/net/ethernet/mellanox/mlx5/core/sriov.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 163 drivers/net/ethernet/mellanox/mlx5/core/sriov.c int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 169 drivers/net/ethernet/mellanox/mlx5/core/sriov.c mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); num_vfs 171 drivers/net/ethernet/mellanox/mlx5/core/sriov.c if (num_vfs) num_vfs 172 drivers/net/ethernet/mellanox/mlx5/core/sriov.c err = mlx5_sriov_enable(pdev, num_vfs); num_vfs 177 drivers/net/ethernet/mellanox/mlx5/core/sriov.c sriov->num_vfs = num_vfs; num_vfs 178 drivers/net/ethernet/mellanox/mlx5/core/sriov.c return err ? err : num_vfs; num_vfs 233 drivers/net/ethernet/mellanox/mlx5/core/sriov.c sriov->num_vfs = pci_num_vf(pdev); num_vfs 4317 drivers/net/ethernet/neterion/vxge/vxge-main.c u32 num_vfs = 0; num_vfs 4471 drivers/net/ethernet/neterion/vxge/vxge-main.c num_vfs = vxge_get_num_vfs(function_mode) - 1; num_vfs 4476 drivers/net/ethernet/neterion/vxge/vxge-main.c ret = pci_enable_sriov(pdev, num_vfs); num_vfs 472 drivers/net/ethernet/netronome/nfp/flower/main.c static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) num_vfs 481 drivers/net/ethernet/netronome/nfp/flower/main.c NFP_REPR_TYPE_VF, num_vfs); num_vfs 623 drivers/net/ethernet/netronome/nfp/flower/main.c if (app->pf->num_vfs) num_vfs 648 drivers/net/ethernet/netronome/nfp/flower/main.c if (app->pf->num_vfs) { num_vfs 652 drivers/net/ethernet/netronome/nfp/flower/main.c app->pf->num_vfs); num_vfs 141 drivers/net/ethernet/netronome/nfp/nfp_app.h int (*sriov_enable)(struct nfp_app *app, int num_vfs); num_vfs 388 drivers/net/ethernet/netronome/nfp/nfp_app.h static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs) num_vfs 392 drivers/net/ethernet/netronome/nfp/nfp_app.h return app->type->sriov_enable(app, num_vfs); num_vfs 223 drivers/net/ethernet/netronome/nfp/nfp_main.c static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) num_vfs 229 drivers/net/ethernet/netronome/nfp/nfp_main.c if (num_vfs > pf->limit_vfs) { num_vfs 235 drivers/net/ethernet/netronome/nfp/nfp_main.c err = pci_enable_sriov(pdev, num_vfs); num_vfs 243 drivers/net/ethernet/netronome/nfp/nfp_main.c err = nfp_app_sriov_enable(pf->app, num_vfs); num_vfs 251 drivers/net/ethernet/netronome/nfp/nfp_main.c pf->num_vfs = num_vfs; num_vfs 253 drivers/net/ethernet/netronome/nfp/nfp_main.c dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); num_vfs 256 drivers/net/ethernet/netronome/nfp/nfp_main.c return num_vfs; num_vfs 285 drivers/net/ethernet/netronome/nfp/nfp_main.c pf->num_vfs = 0; num_vfs 295 drivers/net/ethernet/netronome/nfp/nfp_main.c static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) num_vfs 300 drivers/net/ethernet/netronome/nfp/nfp_main.c if (num_vfs == 0) num_vfs 303 drivers/net/ethernet/netronome/nfp/nfp_main.c return nfp_pcie_sriov_enable(pdev, num_vfs); num_vfs 771 drivers/net/ethernet/netronome/nfp/nfp_main.c pf->num_vfs = pci_num_vf(pdev); num_vfs 772 drivers/net/ethernet/netronome/nfp/nfp_main.c if (pf->num_vfs > pf->limit_vfs) { num_vfs 775 drivers/net/ethernet/netronome/nfp/nfp_main.c pf->num_vfs, pf->limit_vfs); num_vfs 111 drivers/net/ethernet/netronome/nfp/nfp_main.h unsigned int num_vfs; num_vfs 411 drivers/net/ethernet/netronome/nfp/nfp_net_main.c if (pf->num_vfs) { num_vfs 412 drivers/net/ethernet/netronome/nfp/nfp_net_main.c err = nfp_app_sriov_enable(pf->app, pf->num_vfs); num_vfs 428 drivers/net/ethernet/netronome/nfp/nfp_net_main.c if (pf->num_vfs) num_vfs 31 drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c if (vf < 0 || vf >= app->pf->num_vfs) { num_vfs 22 drivers/net/ethernet/netronome/nfp/nic/main.c static int nfp_nic_sriov_enable(struct nfp_app *app, int num_vfs) num_vfs 1802 drivers/net/ethernet/qlogic/qed/qed_debug.c dev_data->num_vfs = MAX_NUM_VFS_K2; num_vfs 1808 drivers/net/ethernet/qlogic/qed/qed_debug.c dev_data->num_vfs = MAX_NUM_VFS_BB; num_vfs 2832 drivers/net/ethernet/qlogic/qed/qed_debug.c split_count = dev_data->num_vfs; num_vfs 1492 drivers/net/ethernet/qlogic/qed/qed_dev.c u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); num_vfs 1499 drivers/net/ethernet/qlogic/qed/qed_dev.c if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) num_vfs 1503 drivers/net/ethernet/qlogic/qed/qed_dev.c num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; num_vfs 1903 drivers/net/ethernet/qlogic/qed/qed_dev.c u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); num_vfs 1909 drivers/net/ethernet/qlogic/qed/qed_dev.c qm_info->num_vf_pqs = num_vfs; num_vfs 1910 drivers/net/ethernet/qlogic/qed/qed_dev.c for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) num_vfs 1129 drivers/net/ethernet/qlogic/qed/qed_hsi.h u8 num_vfs; num_vfs 2577 drivers/net/ethernet/qlogic/qed/qed_hsi.h u8 num_vfs; num_vfs 428 drivers/net/ethernet/qlogic/qed/qed_sp_commands.c p_ramrod->num_vfs = (u8) p_iov->total_vfs; num_vfs 375 drivers/net/ethernet/qlogic/qed/qed_sriov.c pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); num_vfs 376 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (iov->num_vfs) { num_vfs 380 drivers/net/ethernet/qlogic/qed/qed_sriov.c iov->num_vfs = 0; num_vfs 410 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (iov->num_vfs > NUM_OF_VFS(cdev) || num_vfs 418 drivers/net/ethernet/qlogic/qed/qed_sriov.c iov->num_vfs); num_vfs 420 drivers/net/ethernet/qlogic/qed/qed_sriov.c iov->num_vfs = 0; num_vfs 486 drivers/net/ethernet/qlogic/qed/qed_sriov.c u16 num_vfs = 0; num_vfs 488 drivers/net/ethernet/qlogic/qed/qed_sriov.c num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; num_vfs 491 drivers/net/ethernet/qlogic/qed/qed_sriov.c "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); num_vfs 494 drivers/net/ethernet/qlogic/qed/qed_sriov.c p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; num_vfs 504 drivers/net/ethernet/qlogic/qed/qed_sriov.c p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; num_vfs 514 drivers/net/ethernet/qlogic/qed/qed_sriov.c num_vfs; num_vfs 1099 drivers/net/ethernet/qlogic/qed/qed_sriov.c p_hwfn->cdev->p_iov_info->num_vfs++; num_vfs 1153 drivers/net/ethernet/qlogic/qed/qed_sriov.c p_hwfn->cdev->p_iov_info->num_vfs--; num_vfs 4450 drivers/net/ethernet/qlogic/qed/qed_sriov.c if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) num_vfs 119 drivers/net/ethernet/qlogic/qed/qed_sriov.h u16 num_vfs; /* number of vfs that have been started */ num_vfs 177 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h u8 num_vfs; num_vfs 141 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) num_vfs 158 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c sriov->num_vfs = num_vfs; num_vfs 160 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info), num_vfs 190 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < num_vfs; i++) { num_vfs 273 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < sriov->num_vfs; i++) { num_vfs 282 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < sriov->num_vfs; i++) num_vfs 688 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { num_vfs 2169 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < sriov->num_vfs; i++) { num_vfs 2182 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c for (i = 0; i < sriov->num_vfs; i++) { num_vfs 76 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c u32 num_vfs, max, temp; num_vfs 82 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c num_vfs = sriov->num_vfs; num_vfs 83 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c max = num_vfs + 1; num_vfs 96 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs; num_vfs 98 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c temp = res->num_tx_mac_filters - num_macs * num_vfs; num_vfs 100 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC; num_vfs 104 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; num_vfs 163 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c total_fn = sriov->num_vfs + 1; num_vfs 413 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c for (i = 0; i < sriov->num_vfs; i++) num_vfs 567 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs) num_vfs 574 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c err = pci_enable_sriov(adapter->pdev, num_vfs); num_vfs 582 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c int num_vfs) num_vfs 589 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c err = qlcnic_sriov_init(adapter, num_vfs); num_vfs 617 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs) num_vfs 632 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c err = __qlcnic_pci_sriov_enable(adapter, num_vfs); num_vfs 640 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c err = qlcnic_sriov_pf_enable(adapter, num_vfs); num_vfs 646 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c return num_vfs; num_vfs 666 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) num_vfs 674 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (num_vfs == 0) num_vfs 677 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c err = qlcnic_pci_sriov_enable(adapter, num_vfs); num_vfs 1766 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c u16 num_vfs = sriov->num_vfs; num_vfs 1769 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c for (i = 0; i < num_vfs; i++) { num_vfs 1805 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c int i, num_vfs; num_vfs 1812 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c num_vfs = sriov->num_vfs; num_vfs 1814 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (!is_valid_ether_addr(mac) || vf >= num_vfs) num_vfs 1822 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c for (i = 0; i < num_vfs; i++) { num_vfs 1861 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (vf >= sriov->num_vfs) num_vfs 1924 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (vf >= sriov->num_vfs || qos > 7) num_vfs 2002 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (vf >= sriov->num_vfs) num_vfs 2033 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (vf >= sriov->num_vfs) num_vfs 381 drivers/net/ethernet/sfc/ef10_sriov.c static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs) num_vfs 386 drivers/net/ethernet/sfc/ef10_sriov.c efx->vf_count = num_vfs; num_vfs 392 drivers/net/ethernet/sfc/ef10_sriov.c rc = pci_enable_sriov(dev, num_vfs); num_vfs 427 drivers/net/ethernet/sfc/ef10_sriov.c int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) num_vfs 429 drivers/net/ethernet/sfc/ef10_sriov.c if (num_vfs == 0) num_vfs 432 drivers/net/ethernet/sfc/ef10_sriov.c return efx_ef10_pci_sriov_enable(efx, num_vfs); num_vfs 36 drivers/net/ethernet/sfc/ef10_sriov.h int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs); num_vfs 3640 drivers/net/ethernet/sfc/efx.c static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs) num_vfs 3646 drivers/net/ethernet/sfc/efx.c rc = efx->type->sriov_configure(efx, num_vfs); num_vfs 3650 drivers/net/ethernet/sfc/efx.c return num_vfs; num_vfs 1387 drivers/net/ethernet/sfc/net_driver.h int (*sriov_configure)(struct efx_nic *efx, int num_vfs); num_vfs 1682 drivers/net/ethernet/sfc/siena_sriov.c int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs) num_vfs 41 drivers/net/ethernet/sfc/siena_sriov.h int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs); num_vfs 27 drivers/net/netdevsim/bus.c unsigned int num_vfs) num_vfs 29 drivers/net/netdevsim/bus.c nsim_bus_dev->vfconfigs = kcalloc(num_vfs, num_vfs 34 drivers/net/netdevsim/bus.c nsim_bus_dev->num_vfs = num_vfs; num_vfs 43 drivers/net/netdevsim/bus.c nsim_bus_dev->num_vfs = 0; num_vfs 51 drivers/net/netdevsim/bus.c unsigned int num_vfs; num_vfs 54 drivers/net/netdevsim/bus.c ret = kstrtouint(buf, 0, &num_vfs); num_vfs 59 drivers/net/netdevsim/bus.c if (nsim_bus_dev->num_vfs == num_vfs) num_vfs 61 drivers/net/netdevsim/bus.c if (nsim_bus_dev->num_vfs && num_vfs) { num_vfs 66 drivers/net/netdevsim/bus.c if (num_vfs) { num_vfs 67 drivers/net/netdevsim/bus.c ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs); num_vfs 87 drivers/net/netdevsim/bus.c return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs); num_vfs 257 drivers/net/netdevsim/bus.c return nsim_bus_dev->num_vfs; num_vfs 87 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac)) num_vfs 100 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7) num_vfs 115 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 129 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 141 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 153 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 166 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 189 drivers/net/netdevsim/netdev.c if (vf >= nsim_bus_dev->num_vfs) num_vfs 218 drivers/net/netdevsim/netdevsim.h unsigned int num_vfs; num_vfs 273 drivers/pci/iov.c u16 num_vfs; num_vfs 275 drivers/pci/iov.c ret = kstrtou16(buf, 0, &num_vfs); num_vfs 279 drivers/pci/iov.c if (num_vfs > pci_sriov_get_totalvfs(pdev)) num_vfs 284 drivers/pci/iov.c if (num_vfs == pdev->sriov->num_VFs) num_vfs 294 drivers/pci/iov.c if (num_vfs == 0) { num_vfs 303 drivers/pci/iov.c pdev->sriov->num_VFs, num_vfs); num_vfs 308 drivers/pci/iov.c ret = pdev->driver->sriov_configure(pdev, num_vfs); num_vfs 312 drivers/pci/iov.c if (ret != num_vfs) num_vfs 314 drivers/pci/iov.c num_vfs, ret); num_vfs 409 drivers/pci/iov.c int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) num_vfs 419 drivers/pci/iov.c static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) num_vfs 427 drivers/pci/iov.c for (i = 0; i < num_vfs; i++) { num_vfs 592 drivers/virtio/virtio_pci_common.c static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs) num_vfs 607 drivers/virtio/virtio_pci_common.c if (num_vfs == 0) { num_vfs 612 drivers/virtio/virtio_pci_common.c ret = pci_enable_sriov(pci_dev, num_vfs); num_vfs 616 drivers/virtio/virtio_pci_common.c return num_vfs; num_vfs 871 include/linux/mlx4/device.h int num_vfs; num_vfs 469 include/linux/mlx5/driver.h int num_vfs; num_vfs 836 include/linux/pci.h int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ num_vfs 2059 include/linux/pci.h int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); num_vfs 910 net/core/rtnetlink.c int num_vfs = dev_num_vf(dev->dev.parent); num_vfs 912 net/core/rtnetlink.c size += num_vfs * num_vfs 1324 net/core/rtnetlink.c int i, num_vfs; num_vfs 1329 net/core/rtnetlink.c num_vfs = dev_num_vf(dev->dev.parent); num_vfs 1330 net/core/rtnetlink.c if (nla_put_u32(skb, IFLA_NUM_VF, num_vfs)) num_vfs 1340 net/core/rtnetlink.c for (i = 0; i < num_vfs; i++) {