/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
H A D | cm.c | 146 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; id_map_find_by_sl_id() 173 struct mlx4_ib_sriov *sriov = &dev->sriov; id_map_ent_timeout() local 174 struct rb_root *sl_id_map = &sriov->sl_id_map; id_map_ent_timeout() 177 spin_lock(&sriov->id_map_lock); id_map_ent_timeout() 178 db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id); id_map_ent_timeout() 184 idr_remove(&sriov->pv_id_table, pv_id); id_map_ent_timeout() 188 spin_unlock(&sriov->id_map_lock); id_map_ent_timeout() 194 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_find_del() local 195 struct rb_root *sl_id_map = &sriov->sl_id_map; id_map_find_del() 198 spin_lock(&sriov->id_map_lock); id_map_find_del() 199 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_cm_id); id_map_find_del() 205 idr_remove(&sriov->pv_id_table, pv_cm_id); id_map_find_del() 207 spin_unlock(&sriov->id_map_lock); id_map_find_del() 212 struct rb_root *sl_id_map = &to_mdev(ibdev)->sriov.sl_id_map; sl_id_map_add() 247 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_alloc() local 262 spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); id_map_alloc() 264 ret = idr_alloc_cyclic(&sriov->pv_id_table, ent, 0, 0, GFP_NOWAIT); id_map_alloc() 268 list_add_tail(&ent->list, &sriov->cm_list); id_map_alloc() 271 spin_unlock(&sriov->id_map_lock); id_map_alloc() 287 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; id_map_get() local 289 spin_lock(&sriov->id_map_lock); id_map_get() 295 ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, *pv_cm_id); id_map_get() 296 spin_unlock(&sriov->id_map_lock); id_map_get() 303 struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; schedule_delayed() local 306 spin_lock(&sriov->id_map_lock); schedule_delayed() 307 spin_lock_irqsave(&sriov->going_down_lock, flags); schedule_delayed() 309 if (!sriov->is_going_down) { schedule_delayed() 313 spin_unlock_irqrestore(&sriov->going_down_lock, flags); schedule_delayed() 314 spin_unlock(&sriov->id_map_lock); schedule_delayed() 405 spin_lock_init(&dev->sriov.id_map_lock); mlx4_ib_cm_paravirt_init() 406 INIT_LIST_HEAD(&dev->sriov.cm_list); mlx4_ib_cm_paravirt_init() 407 dev->sriov.sl_id_map = RB_ROOT; mlx4_ib_cm_paravirt_init() 408 idr_init(&dev->sriov.pv_id_table); mlx4_ib_cm_paravirt_init() 415 struct mlx4_ib_sriov *sriov = &dev->sriov; mlx4_ib_cm_paravirt_clean() local 416 struct rb_root *sl_id_map = &sriov->sl_id_map; mlx4_ib_cm_paravirt_clean() 423 spin_lock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 424 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { mlx4_ib_cm_paravirt_clean() 431 spin_unlock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 437 spin_lock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean() 445 idr_remove(&sriov->pv_id_table, (int) ent->pv_cm_id); mlx4_ib_cm_paravirt_clean() 447 list_splice_init(&dev->sriov.cm_list, &lh); mlx4_ib_cm_paravirt_clean() 461 idr_remove(&sriov->pv_id_table, (int) map->pv_cm_id); mlx4_ib_cm_paravirt_clean() 465 list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { mlx4_ib_cm_paravirt_clean() 471 spin_unlock(&sriov->id_map_lock); mlx4_ib_cm_paravirt_clean()
|
H A D | alias_GUID.c | 86 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_update_cache_on_guid_change() 102 memcpy(&dev->sriov.demux[port_index].guid_cache[slave_id], mlx4_ib_update_cache_on_guid_change() 117 return *(__be64 *)&dev->sriov.demux[port - 1].guid_cache[index]; get_cached_alias_guid() 136 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_slave_alias_guid_event() 137 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & mlx4_ib_slave_alias_guid_event() 141 curr_guid = *(__be64 *)&dev->sriov. mlx4_ib_slave_alias_guid_event() 154 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 157 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 160 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 164 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 166 dev->sriov.alias_guid.ports_guid[port_index]. mlx4_ib_slave_alias_guid_event() 171 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_slave_alias_guid_event() 204 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. mlx4_ib_notify_slaves_on_guid_change() 206 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. mlx4_ib_notify_slaves_on_guid_change() 231 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); mlx4_ib_notify_slaves_on_guid_change() 243 spin_unlock_irqrestore(&dev->sriov. mlx4_ib_notify_slaves_on_guid_change() 248 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, mlx4_ib_notify_slaves_on_guid_change() 302 rec = &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 322 rec = &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 325 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); aliasguid_query_handler() 419 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); aliasguid_query_handler() 429 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); aliasguid_query_handler() 430 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); aliasguid_query_handler() 431 if (!dev->sriov.is_going_down) { aliasguid_query_handler() 433 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, aliasguid_query_handler() 434 &dev->sriov.alias_guid.ports_guid[port_index]. aliasguid_query_handler() 443 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); aliasguid_query_handler() 444 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); aliasguid_query_handler() 453 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status invalidate_guid_record() 459 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 472 dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 474 if (dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 476 dev->sriov.alias_guid.ports_guid[port - 1]. invalidate_guid_record() 495 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; set_guid_rec() 534 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 536 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 539 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, set_guid_rec() 549 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 552 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 561 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); set_guid_rec() 562 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 564 if (!dev->sriov.is_going_down) { set_guid_rec() 565 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, set_guid_rec() 566 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, set_guid_rec() 569 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); set_guid_rec() 570 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); set_guid_rec() 590 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. mlx4_ib_guid_port_init() 607 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_invalidate_all_guid_record() 608 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_invalidate_all_guid_record() 610 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & mlx4_ib_invalidate_all_guid_record() 613 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= mlx4_ib_invalidate_all_guid_record() 619 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) { mlx4_ib_invalidate_all_guid_record() 625 cancel_delayed_work(&dev->sriov.alias_guid. mlx4_ib_invalidate_all_guid_record() 627 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, mlx4_ib_invalidate_all_guid_record() 628 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, mlx4_ib_invalidate_all_guid_record() 631 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_invalidate_all_guid_record() 632 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_invalidate_all_guid_record() 645 &dev->sriov.alias_guid.ports_guid[port]. set_required_record() 694 rec = dev->sriov.alias_guid.ports_guid[port]. get_low_record_time_index() 724 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); get_next_record_to_update() 734 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); get_next_record_to_update() 750 struct mlx4_ib_dev *dev = container_of(ib_sriov, struct mlx4_ib_dev, sriov); alias_guid_work() 777 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_init_alias_guid_work() 778 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_init_alias_guid_work() 779 if (!dev->sriov.is_going_down) { mlx4_ib_init_alias_guid_work() 784 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. mlx4_ib_init_alias_guid_work() 786 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, mlx4_ib_init_alias_guid_work() 787 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); mlx4_ib_init_alias_guid_work() 789 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); mlx4_ib_init_alias_guid_work() 790 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_init_alias_guid_work() 796 struct mlx4_ib_sriov *sriov = &dev->sriov; mlx4_ib_destroy_alias_guid_service() local 803 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); mlx4_ib_destroy_alias_guid_service() 804 det = &sriov->alias_guid.ports_guid[i]; mlx4_ib_destroy_alias_guid_service() 805 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 813 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 817 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 819 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); mlx4_ib_destroy_alias_guid_service() 822 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 823 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 825 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); mlx4_ib_destroy_alias_guid_service() 826 kfree(dev->sriov.alias_guid.sa_client); mlx4_ib_destroy_alias_guid_service() 838 dev->sriov.alias_guid.sa_client = mlx4_ib_init_alias_guid_service() 839 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); mlx4_ib_init_alias_guid_service() 840 if (!dev->sriov.alias_guid.sa_client) mlx4_ib_init_alias_guid_service() 843 ib_sa_register_client(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 845 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); mlx4_ib_init_alias_guid_service() 855 memset(&dev->sriov.alias_guid.ports_guid[i], 0, mlx4_ib_init_alias_guid_service() 857 dev->sriov.alias_guid.ports_guid[i].state_flags |= mlx4_ib_init_alias_guid_service() 861 memset(dev->sriov.alias_guid.ports_guid[i]. mlx4_ib_init_alias_guid_service() 863 sizeof(dev->sriov.alias_guid.ports_guid[i]. mlx4_ib_init_alias_guid_service() 866 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); mlx4_ib_init_alias_guid_service() 874 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; mlx4_ib_init_alias_guid_service() 875 dev->sriov.alias_guid.ports_guid[i].port = i; mlx4_ib_init_alias_guid_service() 878 dev->sriov.alias_guid.ports_guid[i].wq = mlx4_ib_init_alias_guid_service() 880 if (!dev->sriov.alias_guid.ports_guid[i].wq) { mlx4_ib_init_alias_guid_service() 884 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, mlx4_ib_init_alias_guid_service() 891 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_init_alias_guid_service() 892 dev->sriov.alias_guid.ports_guid[i].wq = NULL; mlx4_ib_init_alias_guid_service() 896 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 897 kfree(dev->sriov.alias_guid.sa_client); mlx4_ib_init_alias_guid_service() 898 dev->sriov.alias_guid.sa_client = NULL; mlx4_ib_init_alias_guid_service()
|
H A D | mad.c | 283 if (!dev->sriov.is_going_down) smp_snoop() 296 !dev->sriov.is_going_down) { smp_snoop() 413 if (dev->sriov.demux[port - 1].guid_cache[i] == guid) mlx4_ib_find_real_gid() 483 tun_ctx = dev->sriov.demux[port-1].tun[slave]; mlx4_ib_send_to_slave() 951 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) handle_lid_change_event() 962 if (!dev->sriov.is_going_down) { handle_client_rereg_event() 963 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0); handle_client_rereg_event() 1072 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down) handle_port_mgmt_change_event() 1080 else if (!dev->sriov.is_going_down) { handle_port_mgmt_change_event() 1111 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_tunnel_comp_handler() 1112 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) mlx4_ib_tunnel_comp_handler() 1114 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_tunnel_comp_handler() 1185 sqp_ctx = dev->sriov.sqps[port-1]; mlx4_ib_send_to_wire() 1766 if (dev->sriov.demux[port - 1].tun[slave]) { free_pv_object() 1767 kfree(dev->sriov.demux[port - 1].tun[slave]); free_pv_object() 1768 dev->sriov.demux[port - 1].tun[slave] = NULL; free_pv_object() 1847 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; create_pv_resources() 1924 clean_vf_mcast(&dev->sriov.demux[port - 1], slave); mlx4_ib_tunnels_update() 1928 dev->sriov.sqps[port - 1], 1); mlx4_ib_tunnels_update() 1931 dev->sriov.demux[port - 1].tun[slave], 1); mlx4_ib_tunnels_update() 1937 dev->sriov.demux[port - 1].tun[slave]); mlx4_ib_tunnels_update() 1942 dev->sriov.sqps[port - 1]); mlx4_ib_tunnels_update() 2095 dev->sriov.is_going_down = 0; mlx4_ib_init_sriov() 2096 spin_lock_init(&dev->sriov.going_down_lock); mlx4_ib_init_sriov() 2131 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id; mlx4_ib_init_sriov() 2133 &dev->sriov.sqps[i]); mlx4_ib_init_sriov() 2136 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); mlx4_ib_init_sriov() 2148 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_init_sriov() 2169 spin_lock_irqsave(&dev->sriov.going_down_lock, flags); mlx4_ib_close_sriov() 2170 dev->sriov.is_going_down = 1; mlx4_ib_close_sriov() 2171 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); mlx4_ib_close_sriov() 2174 flush_workqueue(dev->sriov.demux[i].ud_wq); mlx4_ib_close_sriov() 2175 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); mlx4_ib_close_sriov() 2176 kfree(dev->sriov.sqps[i]); mlx4_ib_close_sriov() 2177 dev->sriov.sqps[i] = NULL; mlx4_ib_close_sriov() 2178 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); mlx4_ib_close_sriov()
|
H A D | sysfs.c | 87 spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags); store_admin_alias_guid() 89 *(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1]. store_admin_alias_guid() 95 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status store_admin_alias_guid() 102 mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes store_admin_alias_guid() 105 spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags); store_admin_alias_guid()
|
H A D | mcg.c | 887 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; mlx4_ib_mcg_demux_handler() 936 struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1]; mlx4_ib_mcg_multiplex_handler()
|
H A D | main.c | 2616 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); do_slave_init() 2617 if (!ibdev->sriov.is_going_down) do_slave_init() 2618 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work); do_slave_init() 2619 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags); do_slave_init()
|
H A D | mlx4_ib.h | 519 struct mlx4_ib_sriov sriov; member in struct:mlx4_ib_dev
|
H A D | qp.c | 2228 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header() 2231 to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. build_mlx_header()
|
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/ |
H A D | qlcnic_sriov_common.c | 137 struct qlcnic_sriov *sriov; qlcnic_sriov_init() local 147 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL); qlcnic_sriov_init() 148 if (!sriov) qlcnic_sriov_init() 151 adapter->ahw->sriov = sriov; qlcnic_sriov_init() 152 sriov->num_vfs = num_vfs; qlcnic_sriov_init() 153 bc = &sriov->bc; qlcnic_sriov_init() 154 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) * qlcnic_sriov_init() 156 if (!sriov->vf_info) { qlcnic_sriov_init() 182 vf = &sriov->vf_info[i]; qlcnic_sriov_init() 201 sriov->vf_info[i].vp = vp; qlcnic_sriov_init() 222 kfree(sriov->vf_info); qlcnic_sriov_init() 225 kfree(adapter->ahw->sriov); qlcnic_sriov_init() 253 struct qlcnic_sriov *sriov = adapter->ahw->sriov; __qlcnic_sriov_cleanup() local 254 struct qlcnic_back_channel *bc = &sriov->bc; __qlcnic_sriov_cleanup() 264 for (i = 0; i < sriov->num_vfs; i++) { __qlcnic_sriov_cleanup() 265 vf = &sriov->vf_info[i]; __qlcnic_sriov_cleanup() 273 for (i = 0; i < sriov->num_vfs; i++) __qlcnic_sriov_cleanup() 274 kfree(sriov->vf_info[i].vp); __qlcnic_sriov_cleanup() 276 kfree(sriov->vf_info); __qlcnic_sriov_cleanup() 277 kfree(adapter->ahw->sriov); __qlcnic_sriov_cleanup() 426 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_guest_vlan_mode() local 430 if (sriov->allowed_vlans) qlcnic_sriov_set_guest_vlan_mode() 433 sriov->any_vlan = cmd->rsp.arg[2] & 0xf; qlcnic_sriov_set_guest_vlan_mode() 434 sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16; qlcnic_sriov_set_guest_vlan_mode() 436 sriov->num_allowed_vlans); qlcnic_sriov_set_guest_vlan_mode() 440 if (!sriov->any_vlan) qlcnic_sriov_set_guest_vlan_mode() 443 num_vlans = sriov->num_allowed_vlans; qlcnic_sriov_set_guest_vlan_mode() 444 sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL); qlcnic_sriov_set_guest_vlan_mode() 445 if (!sriov->allowed_vlans) qlcnic_sriov_set_guest_vlan_mode() 450 sriov->allowed_vlans[i] = vlans[i]; qlcnic_sriov_set_guest_vlan_mode() 457 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_get_vf_acl() local 471 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3; qlcnic_sriov_get_vf_acl() 472 switch (sriov->vlan_mode) { qlcnic_sriov_get_vf_acl() 673 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info; qlcnic_sriov_func_to_index() 679 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) { qlcnic_sriov_func_to_index() 840 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov, qlcnic_sriov_schedule_bc_cmd() argument 848 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work); qlcnic_sriov_schedule_bc_cmd() 1010 vf = &adapter->ahw->sriov->vf_info[index]; qlcnic_sriov_send_bc_cmd() 1077 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf, qlcnic_sriov_process_bc_cmd() 1113 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, __qlcnic_sriov_add_act_list() argument 1122 qlcnic_sriov_schedule_bc_cmd(sriov, vf, __qlcnic_sriov_add_act_list() 1127 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov, qlcnic_sriov_add_act_list() argument 1135 __qlcnic_sriov_add_act_list(sriov, vf, trans); qlcnic_sriov_add_act_list() 1141 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_pending_trans() argument 1179 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) qlcnic_sriov_handle_pending_trans() 1185 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_bc_cmd() argument 1205 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr); qlcnic_sriov_handle_bc_cmd() 1247 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) { qlcnic_sriov_handle_bc_cmd() 1259 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_msg_event() argument 1272 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf); qlcnic_sriov_handle_msg_event() 1280 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov, qlcnic_sriov_handle_flr_event() argument 1286 qlcnic_sriov_pf_handle_flr(sriov, vf); qlcnic_sriov_handle_flr_event() 1295 struct qlcnic_sriov *sriov; qlcnic_sriov_handle_bc_event() local 1299 sriov = adapter->ahw->sriov; qlcnic_sriov_handle_bc_event() 1306 vf = &sriov->vf_info[index]; qlcnic_sriov_handle_bc_event() 1313 qlcnic_sriov_handle_flr_event(sriov, vf); qlcnic_sriov_handle_bc_event() 1318 qlcnic_sriov_handle_msg_event(sriov, vf); qlcnic_sriov_handle_bc_event() 1376 u16 seq = ++adapter->ahw->sriov->bc.trans_counter; __qlcnic_sriov_issue_cmd() 1464 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_channel_cfg_cmd() 1496 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_vf_add_mc_list() local 1501 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_vf_add_mc_list() 1507 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_vf_add_mc_list() 1648 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; qlcnic_sriov_async_issue_cmd() 1933 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_check_vlan_id() argument 1943 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_check_vlan_id() 1954 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov, qlcnic_sriov_validate_num_vlans() argument 1961 if (vf->num_vlan >= sriov->num_allowed_vlans) qlcnic_sriov_validate_num_vlans() 1971 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_validate_vlan_cfg() local 1977 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_validate_vlan_cfg() 1979 if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE) qlcnic_sriov_validate_vlan_cfg() 1986 if (qlcnic_sriov_validate_num_vlans(sriov, vf)) qlcnic_sriov_validate_vlan_cfg() 1989 if (sriov->any_vlan) { qlcnic_sriov_validate_vlan_cfg() 1990 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_validate_vlan_cfg() 1991 if (sriov->allowed_vlans[i] == vid) qlcnic_sriov_validate_vlan_cfg() 1999 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid)) qlcnic_sriov_validate_vlan_cfg() 2010 struct qlcnic_sriov *sriov; qlcnic_sriov_vlan_operation() local 2012 sriov = adapter->ahw->sriov; qlcnic_sriov_vlan_operation() 2021 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id); qlcnic_sriov_vlan_operation() 2024 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id); qlcnic_sriov_vlan_operation() 2037 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_cfg_vf_guest_vlan() local 2047 vf = &adapter->ahw->sriov->vf_info[0]; qlcnic_sriov_cfg_vf_guest_vlan() 2059 qlcnic_sriov_cleanup_async_list(&sriov->bc); qlcnic_sriov_cfg_vf_guest_vlan() 2151 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_alloc_vlans() local 2155 for (i = 0; i < sriov->num_vfs; i++) { qlcnic_sriov_alloc_vlans() 2156 vf = &sriov->vf_info[i]; qlcnic_sriov_alloc_vlans() 2157 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, qlcnic_sriov_alloc_vlans() 2164 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_free_vlans() local 2168 for (i = 0; i < sriov->num_vfs; i++) { qlcnic_sriov_free_vlans() 2169 vf = &sriov->vf_info[i]; qlcnic_sriov_free_vlans() 2175 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_add_vlan_id() argument 2180 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_add_vlan_id() 2189 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov, qlcnic_sriov_del_vlan_id() argument 2194 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_sriov_del_vlan_id()
|
H A D | qlcnic_sriov_pf.c | 70 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_cal_res_limit() local 71 struct qlcnic_resources *res = &sriov->ff_max; qlcnic_sriov_pf_cal_res_limit() 72 u16 num_macs = sriov->num_allowed_vlans + 1; qlcnic_sriov_pf_cal_res_limit() 81 num_vfs = sriov->num_vfs; qlcnic_sriov_pf_cal_res_limit() 103 info->max_tx_ques = res->num_tx_queues - sriov->num_vfs; qlcnic_sriov_pf_cal_res_limit() 108 vp = sriov->vf_info[id].vp; qlcnic_sriov_pf_cal_res_limit() 139 struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max; qlcnic_sriov_pf_set_ff_max_res() 158 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_max_vlan() local 162 total_fn = sriov->num_vfs + 1; qlcnic_sriov_set_vf_max_vlan() 165 sriov->num_allowed_vlans = temp - 1; qlcnic_sriov_set_vf_max_vlan() 168 sriov->num_allowed_vlans = 1; qlcnic_sriov_set_vf_max_vlan() 171 sriov->num_allowed_vlans); qlcnic_sriov_set_vf_max_vlan() 235 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_reset_vport_handle() local 240 sriov->vp_handle = 0; qlcnic_sriov_pf_reset_vport_handle() 245 vp = sriov->vf_info[index].vp; qlcnic_sriov_pf_reset_vport_handle() 253 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_set_vport_handle() local 258 sriov->vp_handle = vport_handle; qlcnic_sriov_pf_set_vport_handle() 263 vp = sriov->vf_info[index].vp; qlcnic_sriov_pf_set_vport_handle() 271 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_get_vport_handle() local 276 return sriov->vp_handle; qlcnic_sriov_pf_get_vport_handle() 280 vf_info = &sriov->vf_info[index]; qlcnic_sriov_pf_get_vport_handle() 398 "Failed to enable sriov eswitch%d\n", err); qlcnic_sriov_pf_cfg_eswitch() 408 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_del_flr_queue() local 409 struct qlcnic_back_channel *bc = &sriov->bc; qlcnic_sriov_pf_del_flr_queue() 412 for (i = 0; i < sriov->num_vfs; i++) qlcnic_sriov_pf_del_flr_queue() 413 cancel_work_sync(&sriov->vf_info[i].flr_work); qlcnic_sriov_pf_del_flr_queue() 420 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc; qlcnic_sriov_pf_create_flr_queue() 693 vp = adapter->ahw->sriov->vf_info[id].vp; qlcnic_sriov_set_vf_acl() 744 struct qlcnic_sriov *sriov; qlcnic_sriov_pf_channel_cfg_cmd() local 750 sriov = adapter->ahw->sriov; qlcnic_sriov_pf_channel_cfg_cmd() 762 size = size * sriov->num_allowed_vlans; qlcnic_sriov_pf_channel_cfg_cmd() 857 struct qlcnic_sriov *sriov; qlcnic_83xx_cfg_default_mac_vlan() local 861 sriov = adapter->ahw->sriov; qlcnic_83xx_cfg_default_mac_vlan() 865 for (i = 0; i < sriov->num_allowed_vlans; i++) { qlcnic_83xx_cfg_default_mac_vlan() 1343 struct qlcnic_sriov *sriov; qlcnic_sriov_pf_get_acl_cmd() local 1346 sriov = adapter->ahw->sriov; qlcnic_sriov_pf_get_acl_cmd() 1360 cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16; qlcnic_sriov_pf_get_acl_cmd() 1374 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_del_guest_vlan() local 1382 qlcnic_sriov_del_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_del_guest_vlan() 1387 qlcnic_sriov_del_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_del_guest_vlan() 1399 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_pf_add_guest_vlan() local 1409 qlcnic_sriov_add_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_add_guest_vlan() 1429 qlcnic_sriov_add_vlan_id(sriov, vf, vlan); qlcnic_sriov_pf_add_guest_vlan() 1649 static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov, qlcnic_sriov_add_act_list_irqsave() argument 1658 __qlcnic_sriov_add_act_list(sriov, vf, trans); qlcnic_sriov_add_act_list_irqsave() 1681 qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf, __qlcnic_sriov_process_flr() 1697 static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_schedule_flr() argument 1705 queue_work(sriov->bc.bc_flr_wq, &vf->flr_work); qlcnic_sriov_schedule_flr() 1712 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_handle_soft_flr() local 1718 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); qlcnic_sriov_handle_soft_flr() 1739 void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_pf_handle_flr() argument 1758 sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans); qlcnic_sriov_pf_handle_flr() 1760 qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr); qlcnic_sriov_pf_handle_flr() 1767 struct qlcnic_sriov *sriov = ahw->sriov; qlcnic_sriov_pf_reset() local 1769 u16 num_vfs = sriov->num_vfs; qlcnic_sriov_pf_reset() 1773 vf = &sriov->vf_info[i]; qlcnic_sriov_pf_reset() 1807 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_mac() local 1815 num_vfs = sriov->num_vfs; qlcnic_sriov_set_vf_mac() 1826 vf_info = &sriov->vf_info[i]; qlcnic_sriov_set_vf_mac() 1835 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_mac() 1855 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_tx_rate() local 1864 if (vf >= sriov->num_vfs) qlcnic_sriov_set_vf_tx_rate() 1867 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_tx_rate() 1920 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_vlan() local 1927 if (vf >= sriov->num_vfs || qos > 7) qlcnic_sriov_set_vf_vlan() 1937 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_vlan() 1947 sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans); qlcnic_sriov_set_vf_vlan() 1959 qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan); qlcnic_sriov_set_vf_vlan() 1996 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_get_vf_config() local 2002 if (vf >= sriov->num_vfs) qlcnic_sriov_get_vf_config() 2005 vp = sriov->vf_info[vf].vp; qlcnic_sriov_get_vf_config() 2026 struct qlcnic_sriov *sriov = adapter->ahw->sriov; qlcnic_sriov_set_vf_spoofchk() local 2033 if (vf >= sriov->num_vfs) qlcnic_sriov_set_vf_spoofchk() 2036 vf_info = &sriov->vf_info[vf]; qlcnic_sriov_set_vf_spoofchk()
|
H A D | qlcnic_sriov.h | 265 static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov, qlcnic_sriov_pf_handle_flr() argument
|
H A D | qlcnic.h | 532 struct qlcnic_sriov *sriov; member in struct:qlcnic_hardware_context
|
H A D | qlcnic_main.c | 1950 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc); __qlcnic_down()
|
/linux-4.1.27/drivers/pci/ |
H A D | iov.c | 26 return dev->bus->number + ((dev->devfn + dev->sriov->offset + pci_iov_virtfn_bus() 27 dev->sriov->stride * vf_id) >> 8); pci_iov_virtfn_bus() 34 return (dev->devfn + dev->sriov->offset + pci_iov_virtfn_devfn() 35 dev->sriov->stride * vf_id) & 0xff; pci_iov_virtfn_devfn() 46 struct pci_sriov *iov = dev->sriov; pci_iov_set_numvfs() 62 struct pci_sriov *iov = dev->sriov; virtfn_max_buses() 108 return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; pci_iov_resource_size() 119 struct pci_sriov *iov = dev->sriov; virtfn_add() 122 mutex_lock(&iov->dev->sriov->lock); virtfn_add() 157 mutex_unlock(&iov->dev->sriov->lock); virtfn_add() 176 mutex_lock(&iov->dev->sriov->lock); virtfn_add() 181 mutex_unlock(&iov->dev->sriov->lock); virtfn_add() 190 struct pci_sriov *iov = dev->sriov; virtfn_remove() 213 mutex_lock(&iov->dev->sriov->lock); virtfn_remove() 216 mutex_unlock(&iov->dev->sriov->lock); virtfn_remove() 236 struct pci_sriov *iov = dev->sriov; sriov_enable() 358 struct pci_sriov *iov = dev->sriov; sriov_disable() 475 dev->sriov = iov; sriov_init() 493 BUG_ON(dev->sriov->num_VFs); sriov_release() 495 if (dev != dev->sriov->dev) sriov_release() 496 pci_dev_put(dev->sriov->dev); sriov_release() 498 mutex_destroy(&dev->sriov->lock); sriov_release() 500 kfree(dev->sriov); sriov_release() 501 dev->sriov = NULL; sriov_release() 508 struct pci_sriov *iov = dev->sriov; sriov_restore_state() 568 return dev->sriov->pos + PCI_SRIOV_BAR + pci_iov_resource_bar() 618 if (dev->sriov->max_VF_buses > max) pci_iov_bus_range() 619 max = dev->sriov->max_VF_buses; pci_iov_bus_range() 669 return dev->sriov->num_VFs; pci_num_vf() 694 pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id); pci_vfs_assigned() 731 if (numvfs > dev->sriov->total_VFs) pci_sriov_set_totalvfs() 735 if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) pci_sriov_set_totalvfs() 738 dev->sriov->driver_max_VFs = numvfs; pci_sriov_set_totalvfs() 757 if (dev->sriov->driver_max_VFs) pci_sriov_get_totalvfs() 758 return dev->sriov->driver_max_VFs; pci_sriov_get_totalvfs() 760 return dev->sriov->total_VFs; pci_sriov_get_totalvfs()
|
H A D | ats.c | 70 mutex_lock(&pdev->sriov->lock); pci_enable_ats() 78 mutex_unlock(&pdev->sriov->lock); pci_enable_ats() 119 mutex_lock(&pdev->sriov->lock); pci_disable_ats() 123 mutex_unlock(&pdev->sriov->lock); pci_disable_ats()
|
H A D | pci-sysfs.c | 451 return sprintf(buf, "%u\n", pdev->sriov->num_VFs); sriov_numvfs_show() 476 if (num_vfs == pdev->sriov->num_VFs) sriov_numvfs_store() 494 if (pdev->sriov->num_VFs) { sriov_numvfs_store() 496 pdev->sriov->num_VFs, num_vfs); sriov_numvfs_store()
|
H A D | pci.c | 1300 /* only skip sriov related */ pci_enable_device_flags()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_sriov.c | 1094 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_bus() 1103 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_devfn() 1112 struct bnx2x_sriov *iov = &bp->vfdb->sriov; bnx2x_vf_set_bars() 1179 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); bnx2x_sriov_pci_cfg_info() 1234 /* verify sriov capability is present in configuration space */ bnx2x_iov_init_one() 1281 /* get the sriov info - Linux already collected all the pertinent bnx2x_iov_init_one() 1282 * information, however the sriov structure is for the private use bnx2x_iov_init_one() 1286 iov = &(bp->vfdb->sriov); bnx2x_iov_init_one() 1359 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) { bnx2x_iov_remove_one() 1362 bp->vfdb->sriov.first_vf_in_pf + bnx2x_iov_remove_one() 1365 bp->vfdb->sriov.first_vf_in_pf + vf_idx); bnx2x_iov_remove_one() 1409 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) * bnx2x_iov_alloc_mem() 1595 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn); bnx2x_iov_nic_init() 1604 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) * for_each_vf() 2047 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) * bnx2x_vf_acquire() 2506 first_vf = bp->vfdb->sriov.first_vf_in_pf; bnx2x_enable_sriov() 2566 /* enable sriov. This will probe all the VFs, and consequentially cause 2569 DP(BNX2X_MSG_IOV, "about to call enable sriov\n"); 2581 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs); 2620 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n"); bnx2x_vf_op_prep()
|
H A D | bnx2x_sriov.h | 212 #define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn) 316 struct bnx2x_sriov sriov; member in struct:bnx2x_vfdb 524 return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF; bnx2x_vf_headroom()
|
H A D | bnx2x_vfpf.c | 2059 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf > bnx2x_vf_mbx_schedule()
|
H A D | bnx2x.h | 1855 /* used only in sriov */
|
H A D | bnx2x_main.c | 3407 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0; bnx2x_drv_info_ether_stat() 13084 /* must be called after sriov-enable */ bnx2x_set_qm_cid_count()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_defs.h | 222 char cap_sriov; /* capability sriov */
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
H A D | siena_sriov.c | 1238 unsigned index, devfn, sriov, buftbl_base; efx_siena_sriov_vfs_init() local 1243 sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV); efx_siena_sriov_vfs_init() 1244 if (!sriov) efx_siena_sriov_vfs_init() 1247 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_OFFSET, &offset); efx_siena_sriov_vfs_init() 1248 pci_read_config_word(pci_dev, sriov + PCI_SRIOV_VF_STRIDE, &stride); efx_siena_sriov_vfs_init()
|
H A D | mcdi_pcol.h | 4827 /* Max number of VFs before sriov stride and offset may need to be changed. */ 4848 /* Max number of VFs before sriov stride and offset may need to be changed. */
|
/linux-4.1.27/drivers/scsi/bfa/ |
H A D | bfa_defs.h | 615 char cap_sriov; /*!< capability sriov */ 776 u8 sriov; member in struct:bfa_ablk_cfg_pf_s
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_sriov.c | 161 e_err(probe, "Failed to enable PCI sriov: %d\n", err); ixgbe_enable_sriov() 283 e_dev_warn("Failed to enable PCI sriov: %d\n", err); ixgbe_pci_sriov_enable()
|
H A D | ixgbe_lib.c | 196 * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
|
/linux-4.1.27/drivers/net/hyperv/ |
H A D | hyperv_net.h | 494 u64 sriov:1; member in struct:nvsp_2_vsc_capability::__anon7455::__anon7456
|
/linux-4.1.27/include/linux/mlx4/ |
H A D | device.h | 84 /* base qkey for use in sriov tunnel-qp/proxy-qp communication.
|
/linux-4.1.27/drivers/net/ethernet/cisco/enic/ |
H A D | enic_main.c | 826 * For sriov vf's set the mac in hw enic_set_vf_mac()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_main.c | 4472 goto sriov; init_one() 4691 sriov:
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/ |
H A D | main.c | 269 * to non-sriov values _mlx4_dev_port()
|
/linux-4.1.27/include/linux/ |
H A D | pci.h | 378 struct pci_sriov *sriov; /* SR-IOV capability related */ member in union:pci_dev::__anon12322
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
H A D | pci-ioda.c | 1395 iov = pdev->sriov; pnv_pci_sriov_disable()
|
/linux-4.1.27/drivers/scsi/lpfc/ |
H A D | lpfc_init.c | 4910 "2806 Failed to enable sriov on this device " lpfc_sli_probe_sriov_nr_virtfn() 4915 "2807 Successful enable sriov on this device " lpfc_sli_probe_sriov_nr_virtfn()
|