/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
H A D | cm.c | 146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; id_map_find_by_sl_id() 173 struct mlx4_ib_sriov *sriov = &dev->sriov; id_map_ent_timeout() local 174 struct rb_root *sl_id_map = &sriov->sl_id_map; id_map_ent_timeout() 177 spin_lock(&sriov->id_map_lock); id_map_ent_timeout() 178 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); id_map_ent_timeout() 184 idr_remove(&sriov->pv_id_table, pv_id); id_map_ent_timeout() 188 spin_unlock(&sriov->id_map_lock); id_map_ent_timeout() 194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_find_del() local 195 struct rb_root *sl_id_map = &sriov->sl_id_map; id_map_find_del() 198 spin_lock(&sriov->id_map_lock); id_map_find_del() 199 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); id_map_find_del() 205 idr_remove(&sriov->pv_id_table, pv_cm_id); id_map_find_del() 207 spin_unlock(&sriov->id_map_lock); id_map_find_del() 212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; sl_id_map_add() 247 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_alloc() local 262 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); id_map_alloc() 264 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); id_map_alloc() 268 list_add_tail(&ent->list, &sriov->cm_list); id_map_alloc() 271 spin_unlock(&sriov->id_map_lock); id_map_alloc() 287 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_get() local 289 spin_lock(&sriov->id_map_lock); id_map_get() 295 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); id_map_get() 296 spin_unlock(&sriov->id_map_lock); id_map_get() 303 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; schedule_delayed() local 306 spin_lock(&sriov->id_map_lock); schedule_delayed() 307 spin_lock_irqsave(&sriov->going_down_lock, flags); schedule_delayed() 309 if (!sriov->is_going_down) { schedule_delayed() 313 spin_unlock_irqrestore(&sriov->going_down_lock, flags); schedule_delayed() 314 spin_unlock(&sriov->id_map_lock); schedule_delayed() 405 spin_lock_init(&dev->sriov.id_map_lock); mlx4_ib_cm_paravirt_init() 406 INIT_LIST_HEAD(&dev->sriov.cm_list); mlx4_ib_cm_paravirt_init() 407 dev->sriov.sl_id_map = RB_ROOT; mlx4_ib_cm_paravirt_init() 408 idr_init(&dev->sriov.pv_id_table); mlx4_ib_cm_paravirt_init() 415 struct mlx4_ib_sriov *sriov = &dev->sriov; mlx4_ib_cm_paravirt_clean() local 416 struct rb_root *sl_id_map = &sriov->sl_id_map; mlx4_ib_cm_paravirt_clean() 423 spin_lock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 424 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { mlx4_ib_cm_paravirt_clean() 431 spin_unlock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 437 spin_lock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 445 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); mlx4_ib_cm_paravirt_clean() 447 list_splice_init(&dev->sriov.cm_list, &lh); mlx4_ib_cm_paravirt_clean() 461 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); mlx4_ib_cm_paravirt_clean() 465 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { mlx4_ib_cm_paravirt_clean() 471 spin_unlock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean()
|
H A D | alias_GUID.c | 86 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_update_cache_on_guid_change() 102 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], mlx4_ib_update_cache_on_guid_change() 117 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; get_cached_alias_guid() 136 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_slave_alias_guid_event() 137 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & mlx4_ib_slave_alias_guid_event() 141 curr_guid = *(__be64 *)&dev->sriov. mlx4_ib_slave_alias_guid_event() 154 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 157 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 160 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 164 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 166 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 171 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_slave_alias_guid_event() 204 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. mlx4_ib_notify_slaves_on_guid_change() 206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_notify_slaves_on_guid_change() 236 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_notify_slaves_on_guid_change() 248 spin_unlock_irqrestore(&dev->sriov. mlx4_ib_notify_slaves_on_guid_change() 253 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, mlx4_ib_notify_slaves_on_guid_change() 307 rec = &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 327 rec = &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 330 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); aliasguid_query_handler() 424 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); aliasguid_query_handler() 434 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); aliasguid_query_handler() 435 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); aliasguid_query_handler() 436 if (!dev->sriov.is_going_down) { aliasguid_query_handler() 438 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, aliasguid_query_handler() 439 &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 448 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); aliasguid_query_handler() 449 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); aliasguid_query_handler() 458 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status invalidate_guid_record() 464 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 477 dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 479 if (dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 481 dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 500 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; set_guid_rec() 539 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 541 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 544 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, set_guid_rec() 554 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 557 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 566 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); set_guid_rec() 567 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 569 if (!dev->sriov.is_going_down) { set_guid_rec() 570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, set_guid_rec() 571 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, set_guid_rec() 574 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 575 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); set_guid_rec() 595 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. mlx4_ib_guid_port_init() 612 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_invalidate_all_guid_record() 613 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_invalidate_all_guid_record() 615 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & mlx4_ib_invalidate_all_guid_record() 618 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= mlx4_ib_invalidate_all_guid_record() 624 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { mlx4_ib_invalidate_all_guid_record() 630 cancel_delayed_work(&dev->sriov.alias_guid. mlx4_ib_invalidate_all_guid_record() 632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, mlx4_ib_invalidate_all_guid_record() 633 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, mlx4_ib_invalidate_all_guid_record() 636 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_invalidate_all_guid_record() 637 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_invalidate_all_guid_record() 650 &dev->sriov.alias_guid.ports_guid[port]. set_required_record() 699 rec = dev->sriov.alias_guid.ports_guid[port]. get_low_record_time_index() 729 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); get_next_record_to_update() 739 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); get_next_record_to_update() 755 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); alias_guid_work() 782 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_init_alias_guid_work() 783 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_init_alias_guid_work() 784 if (!dev->sriov.is_going_down) { mlx4_ib_init_alias_guid_work() 789 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. mlx4_ib_init_alias_guid_work() 791 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, mlx4_ib_init_alias_guid_work() 792 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); mlx4_ib_init_alias_guid_work() 794 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_init_alias_guid_work() 795 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_init_alias_guid_work() 801 struct mlx4_ib_sriov *sriov = &dev->sriov; mlx4_ib_destroy_alias_guid_service() local 808 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); mlx4_ib_destroy_alias_guid_service() 809 det = &sriov->alias_guid.ports_guid[i]; mlx4_ib_destroy_alias_guid_service() 810 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 818 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 822 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 824 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 827 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 828 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 830 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); mlx4_ib_destroy_alias_guid_service() 831 kfree(dev->sriov.alias_guid.sa_client); mlx4_ib_destroy_alias_guid_service() 843 dev->sriov.alias_guid.sa_client = mlx4_ib_init_alias_guid_service() 844 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); mlx4_ib_init_alias_guid_service() 845 if (!dev->sriov.alias_guid.sa_client) mlx4_ib_init_alias_guid_service() 848 ib_sa_register_client(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 850 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); mlx4_ib_init_alias_guid_service() 860 memset(&dev->sriov.alias_guid.ports_guid[i], 0, mlx4_ib_init_alias_guid_service() 862 dev->sriov.alias_guid.ports_guid[i].state_flags |= mlx4_ib_init_alias_guid_service() 866 memset(dev->sriov.alias_guid.ports_guid[i]. mlx4_ib_init_alias_guid_service() 868 sizeof(dev->sriov.alias_guid.ports_guid[i]. mlx4_ib_init_alias_guid_service() 871 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); mlx4_ib_init_alias_guid_service() 879 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; mlx4_ib_init_alias_guid_service() 880 dev->sriov.alias_guid.ports_guid[i].port = i; mlx4_ib_init_alias_guid_service() 883 dev->sriov.alias_guid.ports_guid[i].wq = mlx4_ib_init_alias_guid_service() 885 if (!dev->sriov.alias_guid.ports_guid[i].wq) { mlx4_ib_init_alias_guid_service() 889 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, mlx4_ib_init_alias_guid_service() 896 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_init_alias_guid_service() 897 dev->sriov.alias_guid.ports_guid[i].wq = NULL; mlx4_ib_init_alias_guid_service() 901 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 902 kfree(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 903 dev->sriov.alias_guid.sa_client = NULL; mlx4_ib_init_alias_guid_service()
|
H A D | mad.c | 276 if (!dev->sriov.is_going_down) smp_snoop() 289 !dev->sriov.is_going_down) { smp_snoop() 407 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) mlx4_ib_find_real_gid() 478 tun_ctx = dev->sriov.demux[port-1].tun[slave]; mlx4_ib_send_to_slave() 969 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) handle_lid_change_event() 980 if (!dev->sriov.is_going_down) { handle_client_rereg_event() 981 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); handle_client_rereg_event() 1090 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) handle_port_mgmt_change_event() 1098 else if (!dev->sriov.is_going_down) { handle_port_mgmt_change_event() 1129 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_tunnel_comp_handler() 1130 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) mlx4_ib_tunnel_comp_handler() 1132 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_tunnel_comp_handler() 1204 sqp_ctx = dev->sriov.sqps[port-1]; mlx4_ib_send_to_wire() 1792 if (dev->sriov.demux[port - 1].tun[slave]) { free_pv_object() 1793 kfree(dev->sriov.demux[port - 1].tun[slave]); free_pv_object() 1794 dev->sriov.demux[port - 1].tun[slave] = NULL; free_pv_object() 1868 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; create_pv_resources() 1939 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); mlx4_ib_tunnels_update() 1943 dev->sriov.sqps[port - 1], 1); mlx4_ib_tunnels_update() 1946 dev->sriov.demux[port - 1].tun[slave], 1); mlx4_ib_tunnels_update() 1952 dev->sriov.demux[port - 1].tun[slave]); mlx4_ib_tunnels_update() 1957 dev->sriov.sqps[port - 1]); mlx4_ib_tunnels_update() 2108 dev->sriov.is_going_down = 0; mlx4_ib_init_sriov() 2109 spin_lock_init(&dev->sriov.going_down_lock); mlx4_ib_init_sriov() 2144 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; mlx4_ib_init_sriov() 2146 &dev->sriov.sqps[i]); mlx4_ib_init_sriov() 2149 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); mlx4_ib_init_sriov() 2161 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_init_sriov() 2182 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_close_sriov() 2183 dev->sriov.is_going_down = 1; mlx4_ib_close_sriov() 2184 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_close_sriov() 2187 flush_workqueue(dev->sriov.demux[i].ud_wq); mlx4_ib_close_sriov() 2188 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); mlx4_ib_close_sriov() 2189 kfree(dev->sriov.sqps[i]); mlx4_ib_close_sriov() 2190 dev->sriov.sqps[i] = NULL; mlx4_ib_close_sriov() 2191 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_close_sriov()
|
H A D | sysfs.c | 87 spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); store_admin_alias_guid() 89 *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. store_admin_alias_guid() 95 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status store_admin_alias_guid() 102 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes store_admin_alias_guid() 105 spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); store_admin_alias_guid()
|
H A D | mcg.c | 891 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; mlx4_ib_mcg_demux_handler() 940 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; mlx4_ib_mcg_multiplex_handler()
|
H A D | main.c | 2639 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); do_slave_init() 2640 if (!ibdev->sriov.is_going_down) { do_slave_init() 2642 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); do_slave_init() 2643 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); do_slave_init() 2645 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); do_slave_init()
|
H A D | mlx4_ib.h | 557 struct mlx4_ib_sriov sriov; member in struct:mlx4_ib_dev
|
H A D | qp.c | 2340 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header() 2343 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header()
|
/linux-4.4.14/drivers/net/ethernet/qlogic/qlcnic/ |
H A D | qlcnic_sriov_common.c | 138 struct qlcnic_sriov *sriov; qlcnic_sriov_init() local 148 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); qlcnic_sriov_init() 149 if (!sriov) qlcnic_sriov_init() 152 adapter->ahw->sriov = sriov; qlcnic_sriov_init() 153 sriov->num_vfs = num_vfs; qlcnic_sriov_init() 154 bc = &sriov->bc; qlcnic_sriov_init() 155 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * qlcnic_sriov_init() 157 if (!sriov->vf_info) { qlcnic_sriov_init() 183 vf = &sriov->vf_info[i]; qlcnic_sriov_init() 202 sriov->vf_info[i].vp = vp; qlcnic_sriov_init() 223 kfree(sriov->vf_info); qlcnic_sriov_init() 226 kfree(adapter->ahw->sriov); qlcnic_sriov_init() 254 struct qlcnic_sriov *sriov = adapter->ahw->sriov; __qlcnic_sriov_cleanup() local 255 struct qlcnic_back_channel *bc = &sriov->bc; __qlcnic_sriov_cleanup() 265 for (i = 0; i < sriov->num_vfs; i++) { __qlcnic_sriov_cleanup() 266 vf = &sriov->vf_info[i]; __qlcnic_sriov_cleanup() 274 for (i = 0; i < sriov->num_vfs; i++) __qlcnic_sriov_cleanup() 275 kfree(sriov->vf_info[i].vp); __qlcnic_sriov_cleanup() 277 kfree(sriov->vf_info); __qlcnic_sriov_cleanup() 278 kfree(adapter->ahw->sriov); __qlcnic_sriov_cleanup() 427 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_guest_vlan_mode() local 431 if (sriov->allowed_vlans) qlcnic_sriov_set_guest_vlan_mode() 434 sriov->any_vlan = cmd->rsp.arg[2] & 0xf; qlcnic_sriov_set_guest_vlan_mode() 435 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; qlcnic_sriov_set_guest_vlan_mode() 437 sriov->num_allowed_vlans); qlcnic_sriov_set_guest_vlan_mode() 441 if (!sriov->any_vlan) qlcnic_sriov_set_guest_vlan_mode() 444 num_vlans = sriov->num_allowed_vlans; qlcnic_sriov_set_guest_vlan_mode() 445 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); qlcnic_sriov_set_guest_vlan_mode() 446 if (!sriov->allowed_vlans) qlcnic_sriov_set_guest_vlan_mode() 451 sriov->allowed_vlans[i] = vlans[i]; qlcnic_sriov_set_guest_vlan_mode() 458 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_get_vf_acl() local 472 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; qlcnic_sriov_get_vf_acl() 473 switch (sriov->vlan_mode) { qlcnic_sriov_get_vf_acl() 674 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; qlcnic_sriov_func_to_index() 680 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { qlcnic_sriov_func_to_index() 839 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, qlcnic_sriov_schedule_bc_cmd() argument 847 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); qlcnic_sriov_schedule_bc_cmd() 1009 vf = &adapter->ahw->sriov->vf_info[index]; qlcnic_sriov_send_bc_cmd() 1076 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, qlcnic_sriov_process_bc_cmd() 1112 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, __qlcnic_sriov_add_act_list() argument 1121 qlcnic_sriov_schedule_bc_cmd(sriov, vf, __qlcnic_sriov_add_act_list() 1126 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, qlcnic_sriov_add_act_list() argument 1134 __qlcnic_sriov_add_act_list(sriov, vf, trans); qlcnic_sriov_add_act_list() 1140 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_pending_trans() argument 1178 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) qlcnic_sriov_handle_pending_trans() 1184 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_bc_cmd() argument 1204 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); qlcnic_sriov_handle_bc_cmd() 1246 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { qlcnic_sriov_handle_bc_cmd() 1258 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_msg_event() argument 1271 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); qlcnic_sriov_handle_msg_event() 1279 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_flr_event() argument 1285 qlcnic_sriov_pf_handle_flr(sriov, vf); qlcnic_sriov_handle_flr_event() 1294 struct qlcnic_sriov *sriov; qlcnic_sriov_handle_bc_event() local 1298 sriov = adapter->ahw->sriov; qlcnic_sriov_handle_bc_event() 1305 vf = &sriov->vf_info[index]; qlcnic_sriov_handle_bc_event() 1312 qlcnic_sriov_handle_flr_event(sriov, vf); qlcnic_sriov_handle_bc_event() 1317 qlcnic_sriov_handle_msg_event(sriov, vf); qlcnic_sriov_handle_bc_event() 1375 u16 seq = ++adapter->ahw->sriov->bc.trans_counter; __qlcnic_sriov_issue_cmd() 1463 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_channel_cfg_cmd() 1495 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_vf_add_mc_list() local 1500 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_vf_add_mc_list() 1506 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_vf_add_mc_list() 1647 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; qlcnic_sriov_async_issue_cmd() 1932 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_check_vlan_id() argument 1942 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_check_vlan_id() 1953 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, qlcnic_sriov_validate_num_vlans() argument 1960 if (vf->num_vlan >= sriov->num_allowed_vlans) qlcnic_sriov_validate_num_vlans() 1970 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_validate_vlan_cfg() local 1976 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_validate_vlan_cfg() 1978 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) qlcnic_sriov_validate_vlan_cfg() 1985 if (qlcnic_sriov_validate_num_vlans(sriov, vf)) qlcnic_sriov_validate_vlan_cfg() 1988 if (sriov->any_vlan) { qlcnic_sriov_validate_vlan_cfg() 1989 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_validate_vlan_cfg() 1990 if (sriov->allowed_vlans[i] == vid) qlcnic_sriov_validate_vlan_cfg() 1998 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) qlcnic_sriov_validate_vlan_cfg() 2009 struct qlcnic_sriov *sriov; qlcnic_sriov_vlan_operation() local 2011 sriov = adapter->ahw->sriov; qlcnic_sriov_vlan_operation() 2020 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); qlcnic_sriov_vlan_operation() 2023 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); qlcnic_sriov_vlan_operation() 2036 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_cfg_vf_guest_vlan() local 2046 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_cfg_vf_guest_vlan() 2058 qlcnic_sriov_cleanup_async_list(&sriov->bc); qlcnic_sriov_cfg_vf_guest_vlan() 2150 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_alloc_vlans() local 2154 for (i = 0; i < sriov->num_vfs; i++) { qlcnic_sriov_alloc_vlans() 2155 vf = &sriov->vf_info[i]; qlcnic_sriov_alloc_vlans() 2156 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, qlcnic_sriov_alloc_vlans() 2163 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_free_vlans() local 2167 for (i = 0; i < sriov->num_vfs; i++) { qlcnic_sriov_free_vlans() 2168 vf = &sriov->vf_info[i]; qlcnic_sriov_free_vlans() 2174 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_add_vlan_id() argument 2179 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_add_vlan_id() 2188 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_del_vlan_id() argument 2193 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_del_vlan_id()
|
H A D | qlcnic_sriov_pf.c | 71 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_cal_res_limit() local 72 struct qlcnic_resources *res = &sriov->ff_max; qlcnic_sriov_pf_cal_res_limit() 73 u16 num_macs = sriov->num_allowed_vlans + 1; qlcnic_sriov_pf_cal_res_limit() 82 num_vfs = sriov->num_vfs; qlcnic_sriov_pf_cal_res_limit() 104 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; qlcnic_sriov_pf_cal_res_limit() 109 vp = sriov->vf_info[id].vp; qlcnic_sriov_pf_cal_res_limit() 140 struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; qlcnic_sriov_pf_set_ff_max_res() 159 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_max_vlan() local 163 total_fn = sriov->num_vfs + 1; qlcnic_sriov_set_vf_max_vlan() 166 sriov->num_allowed_vlans = temp - 1; qlcnic_sriov_set_vf_max_vlan() 169 sriov->num_allowed_vlans = 1; qlcnic_sriov_set_vf_max_vlan() 172 sriov->num_allowed_vlans); qlcnic_sriov_set_vf_max_vlan() 236 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_reset_vport_handle() local 241 sriov->vp_handle = 0; qlcnic_sriov_pf_reset_vport_handle() 246 vp = sriov->vf_info[index].vp; qlcnic_sriov_pf_reset_vport_handle() 254 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_set_vport_handle() local 259 sriov->vp_handle = vport_handle; qlcnic_sriov_pf_set_vport_handle() 264 vp = sriov->vf_info[index].vp; qlcnic_sriov_pf_set_vport_handle() 272 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_get_vport_handle() local 277 return sriov->vp_handle; qlcnic_sriov_pf_get_vport_handle() 281 vf_info = &sriov->vf_info[index]; qlcnic_sriov_pf_get_vport_handle() 399 "Failed to enable sriov eswitch%d\n", err); qlcnic_sriov_pf_cfg_eswitch() 409 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_del_flr_queue() local 410 struct qlcnic_back_channel *bc = &sriov->bc; qlcnic_sriov_pf_del_flr_queue() 413 for (i = 0; i < sriov->num_vfs; i++) qlcnic_sriov_pf_del_flr_queue() 414 cancel_work_sync(&sriov->vf_info[i].flr_work); qlcnic_sriov_pf_del_flr_queue() 421 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; qlcnic_sriov_pf_create_flr_queue() 694 vp = adapter->ahw->sriov->vf_info[id].vp; qlcnic_sriov_set_vf_acl() 745 struct qlcnic_sriov *sriov; qlcnic_sriov_pf_channel_cfg_cmd() local 751 sriov = adapter->ahw->sriov; qlcnic_sriov_pf_channel_cfg_cmd() 763 size = size * sriov->num_allowed_vlans; qlcnic_sriov_pf_channel_cfg_cmd() 858 struct qlcnic_sriov *sriov; qlcnic_83xx_cfg_default_mac_vlan() local 862 sriov = adapter->ahw->sriov; qlcnic_83xx_cfg_default_mac_vlan() 866 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_83xx_cfg_default_mac_vlan() 1344 struct qlcnic_sriov *sriov; qlcnic_sriov_pf_get_acl_cmd() local 1347 sriov = adapter->ahw->sriov; qlcnic_sriov_pf_get_acl_cmd() 1361 cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16; qlcnic_sriov_pf_get_acl_cmd() 1375 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_del_guest_vlan() local 1383 qlcnic_sriov_del_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_del_guest_vlan() 1388 qlcnic_sriov_del_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_del_guest_vlan() 1400 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_add_guest_vlan() local 1410 qlcnic_sriov_add_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_add_guest_vlan() 1430 qlcnic_sriov_add_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_add_guest_vlan() 1650 static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov, qlcnic_sriov_add_act_list_irqsave() argument 1659 __qlcnic_sriov_add_act_list(sriov, vf, trans); qlcnic_sriov_add_act_list_irqsave() 1682 qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf, __qlcnic_sriov_process_flr() 1698 static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_schedule_flr() argument 1706 queue_work(sriov->bc.bc_flr_wq, &vf->flr_work); qlcnic_sriov_schedule_flr() 1713 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_handle_soft_flr() local 1719 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); qlcnic_sriov_handle_soft_flr() 1740 void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_pf_handle_flr() argument 1759 sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans); qlcnic_sriov_pf_handle_flr() 1761 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); qlcnic_sriov_pf_handle_flr() 1768 struct qlcnic_sriov *sriov = ahw->sriov; qlcnic_sriov_pf_reset() local 1770 u16 num_vfs = sriov->num_vfs; qlcnic_sriov_pf_reset() 1774 vf = &sriov->vf_info[i]; qlcnic_sriov_pf_reset() 1808 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_mac() local 1816 num_vfs = sriov->num_vfs; qlcnic_sriov_set_vf_mac() 1827 vf_info = &sriov->vf_info[i]; qlcnic_sriov_set_vf_mac() 1836 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_mac() 1856 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_tx_rate() local 1865 if (vf >= sriov->num_vfs) qlcnic_sriov_set_vf_tx_rate() 1868 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_tx_rate() 1921 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_vlan() local 1928 if (vf >= sriov->num_vfs || qos > 7) qlcnic_sriov_set_vf_vlan() 1938 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_vlan() 1948 sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans); qlcnic_sriov_set_vf_vlan() 1960 qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan); qlcnic_sriov_set_vf_vlan() 1997 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_get_vf_config() local 2003 if (vf >= sriov->num_vfs) qlcnic_sriov_get_vf_config() 2006 vp = sriov->vf_info[vf].vp; qlcnic_sriov_get_vf_config() 2027 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_spoofchk() local 2034 if (vf >= sriov->num_vfs) qlcnic_sriov_set_vf_spoofchk() 2037 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_spoofchk()
|
H A D | qlcnic_sriov.h | 266 static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_pf_handle_flr() argument
|
H A D | qlcnic.h | 530 struct qlcnic_sriov *sriov; member in struct:qlcnic_hardware_context
|
H A D | qlcnic_main.c | 1960 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); __qlcnic_down()
|
/linux-4.4.14/drivers/pci/ |
H A D | iov.c | 26 return dev->bus->number + ((dev->devfn + dev->sriov->offset + pci_iov_virtfn_bus() 27 dev->sriov->stride * vf_id) >> 8); pci_iov_virtfn_bus() 34 return (dev->devfn + dev->sriov->offset + pci_iov_virtfn_devfn() 35 dev->sriov->stride * vf_id) & 0xff; pci_iov_virtfn_devfn() 46 struct pci_sriov *iov = dev->sriov; pci_iov_set_numvfs() 62 struct pci_sriov *iov = dev->sriov; compute_max_vf_buses() 113 return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; pci_iov_resource_size() 124 struct pci_sriov *iov = dev->sriov; virtfn_add() 127 mutex_lock(&iov->dev->sriov->lock); virtfn_add() 162 mutex_unlock(&iov->dev->sriov->lock); virtfn_add() 181 mutex_lock(&iov->dev->sriov->lock); virtfn_add() 186 mutex_unlock(&iov->dev->sriov->lock); virtfn_add() 195 struct pci_sriov *iov = dev->sriov; virtfn_remove() 218 mutex_lock(&iov->dev->sriov->lock); virtfn_remove() 221 mutex_unlock(&iov->dev->sriov->lock); virtfn_remove() 246 struct pci_sriov *iov = dev->sriov; sriov_enable() 356 struct pci_sriov *iov = dev->sriov; sriov_disable() 473 dev->sriov = iov; sriov_init() 482 dev->sriov = NULL; sriov_init() 496 BUG_ON(dev->sriov->num_VFs); sriov_release() 498 if (dev != dev->sriov->dev) sriov_release() 499 pci_dev_put(dev->sriov->dev); sriov_release() 501 mutex_destroy(&dev->sriov->lock); sriov_release() 503 kfree(dev->sriov); sriov_release() 504 dev->sriov = NULL; sriov_release() 511 struct pci_sriov *iov = dev->sriov; sriov_restore_state() 571 return dev->sriov->pos + PCI_SRIOV_BAR + pci_iov_resource_bar() 621 if (dev->sriov->max_VF_buses > max) pci_iov_bus_range() 622 max = dev->sriov->max_VF_buses; pci_iov_bus_range() 672 return dev->sriov->num_VFs; pci_num_vf() 697 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); pci_vfs_assigned() 734 if (numvfs > dev->sriov->total_VFs) pci_sriov_set_totalvfs() 738 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) pci_sriov_set_totalvfs() 741 dev->sriov->driver_max_VFs = numvfs; pci_sriov_set_totalvfs() 760 if (dev->sriov->driver_max_VFs) pci_sriov_get_totalvfs() 761 return dev->sriov->driver_max_VFs; pci_sriov_get_totalvfs() 763 return dev->sriov->total_VFs; pci_sriov_get_totalvfs()
|
H A D | pci-sysfs.c | 454 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); sriov_numvfs_show() 479 if (num_vfs == pdev->sriov->num_VFs) sriov_numvfs_store() 497 if (pdev->sriov->num_VFs) { sriov_numvfs_store() 499 pdev->sriov->num_VFs, num_vfs); sriov_numvfs_store()
|
H A D | pci.c | 1345 /* only skip sriov related */ pci_enable_device_flags()
|
/linux-4.4.14/drivers/net/ethernet/sfc/ |
H A D | sriov.c | 12 #include "sriov.h"
|
H A D | siena_sriov.c | 1246 unsigned index, devfn, sriov, buftbl_base; efx_siena_sriov_vfs_init() local 1251 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); efx_siena_sriov_vfs_init() 1252 if (!sriov) efx_siena_sriov_vfs_init() 1255 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); efx_siena_sriov_vfs_init() 1256 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); efx_siena_sriov_vfs_init()
|
H A D | efx.c | 29 #include "sriov.h"
|
H A D | farch.c | 23 #include "sriov.h"
|
H A D | mcdi_pcol.h | 6549 /* Max number of VFs before sriov stride and offset may need to be changed. */ 6572 /* Max number of VFs before sriov stride and offset may need to be changed. */
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_sriov.c | 1044 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_bus() 1053 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_devfn() 1062 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_set_bars() 1129 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); bnx2x_sriov_pci_cfg_info() 1184 /* verify sriov capability is present in configuration space */ bnx2x_iov_init_one() 1231 /* get the sriov info - Linux already collected all the pertinent bnx2x_iov_init_one() 1232 * information, however the sriov structure is for the private use bnx2x_iov_init_one() 1236 iov = &(bp->vfdb->sriov); bnx2x_iov_init_one() 1312 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { bnx2x_iov_remove_one() 1315 bp->vfdb->sriov.first_vf_in_pf + bnx2x_iov_remove_one() 1318 bp->vfdb->sriov.first_vf_in_pf + vf_idx); bnx2x_iov_remove_one() 1362 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * bnx2x_iov_alloc_mem() 1548 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); bnx2x_iov_nic_init() 1557 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * for_each_vf() 2004 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * bnx2x_vf_acquire() 2444 first_vf = bp->vfdb->sriov.first_vf_in_pf; bnx2x_enable_sriov() 2504 /* enable sriov. This will probe all the VFs, and consequentially cause 2507 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2519 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2558 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); bnx2x_vf_op_prep()
|
H A D | bnx2x_sriov.h | 214 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) 236 #define GET_NUM_VFS_PER_PF(bp) ((bp)->vfdb ? (bp)->vfdb->sriov.total \ 328 struct bnx2x_sriov sriov; member in struct:bnx2x_vfdb 536 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; bnx2x_vf_headroom()
|
H A D | bnx2x_vfpf.c | 2179 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > bnx2x_vf_mbx_schedule()
|
H A D | bnx2x_main.c | 3403 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; bnx2x_drv_info_ether_stat() 13503 /* must be called after sriov-enable */ bnx2x_set_qm_cid_count()
|
/linux-4.4.14/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_defs.h | 219 char cap_sriov; /* capability sriov */
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | eq.c | 482 /* TODO: sriov max_vf + */ 1, mlx5_start_eqs()
|
/linux-4.4.14/drivers/scsi/bfa/ |
H A D | bfa_defs.h | 615 char cap_sriov; /*!< capability sriov */ 776 u8 sriov; member in struct:bfa_ablk_cfg_pf_s
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt_sriov.c | 29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); bnxt_vf_ndo_prep()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_sriov.c | 167 e_err(probe, "Failed to enable PCI sriov: %d\n", err); ixgbe_enable_sriov() 289 e_dev_warn("Failed to enable PCI sriov: %d\n", err); ixgbe_pci_sriov_enable()
|
H A D | ixgbe_lib.c | 196 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
|
/linux-4.4.14/drivers/net/hyperv/ |
H A D | hyperv_net.h | 496 u64 sriov:1; member in struct:nvsp_2_vsc_capability::__anon7922::__anon7923
|
H A D | netvsc.c | 464 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; negotiate_nvsp_ver()
|
/linux-4.4.14/include/linux/mlx4/ |
H A D | device.h | 86 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
|
/linux-4.4.14/arch/sparc/kernel/ |
H A D | pci.c | 1002 /* Add sriov arch specific initialization here. pcibios_add_device()
|
/linux-4.4.14/drivers/net/ethernet/cisco/enic/ |
H A D | enic_main.c | 896 * For sriov vf's set the mac in hw enic_set_vf_mac()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_main.c | 4691 goto sriov; init_one() 4949 sriov:
|
/linux-4.4.14/include/linux/ |
H A D | pci.h | 378 struct pci_sriov *sriov; /* SR-IOV capability related */ member in union:pci_dev::__anon13067
|
/linux-4.4.14/arch/powerpc/platforms/powernv/ |
H A D | pci-ioda.c | 1361 iov = pdev->sriov; pnv_pci_sriov_disable()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
H A D | main.c | 269 * to non-sriov values _mlx4_dev_port()
|
/linux-4.4.14/drivers/scsi/lpfc/ |
H A D | lpfc_init.c | 4930 "2806 Failed to enable sriov on this device " lpfc_sli_probe_sriov_nr_virtfn() 4935 "2807 Successful enable sriov on this device " lpfc_sli_probe_sriov_nr_virtfn()
|