Lines Matching refs:mdev
196 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); in mlx4_en_filter_work()
201 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); in mlx4_en_filter_work()
267 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); in mlx4_en_filter_free()
349 queue_work(priv->mdev->workqueue, &filter->work); in mlx4_en_filter_rfs()
415 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_vlan_rx_add_vid() local
424 mutex_lock(&mdev->state_lock); in mlx4_en_vlan_rx_add_vid()
425 if (mdev->device_up && priv->port_up) { in mlx4_en_vlan_rx_add_vid()
426 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); in mlx4_en_vlan_rx_add_vid()
430 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) in mlx4_en_vlan_rx_add_vid()
432 mutex_unlock(&mdev->state_lock); in mlx4_en_vlan_rx_add_vid()
441 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_vlan_rx_kill_vid() local
449 mutex_lock(&mdev->state_lock); in mlx4_en_vlan_rx_kill_vid()
450 mlx4_unregister_vlan(mdev->dev, priv->port, vid); in mlx4_en_vlan_rx_kill_vid()
452 if (mdev->device_up && priv->port_up) { in mlx4_en_vlan_rx_kill_vid()
453 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); in mlx4_en_vlan_rx_kill_vid()
457 mutex_unlock(&mdev->state_lock); in mlx4_en_vlan_rx_kill_vid()
478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || in mlx4_en_tunnel_steer_add()
479 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) in mlx4_en_tunnel_steer_add()
482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add()
496 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_uc_steer_add() local
497 struct mlx4_dev *dev = mdev->dev; in mlx4_en_uc_steer_add()
548 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_uc_steer_release() local
549 struct mlx4_dev *dev = mdev->dev; in mlx4_en_uc_steer_release()
574 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_get_qp() local
575 struct mlx4_dev *dev = mdev->dev; in mlx4_en_get_qp()
610 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_put_qp() local
611 struct mlx4_dev *dev = mdev->dev; in mlx4_en_put_qp()
630 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_replace_mac() local
631 struct mlx4_dev *dev = mdev->dev; in mlx4_en_replace_mac()
663 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_replace_mac()
700 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_set_mac() local
708 mutex_lock(&mdev->state_lock); in mlx4_en_set_mac()
713 mutex_unlock(&mdev->state_lock); in mlx4_en_set_mac()
801 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); in mlx4_en_set_rx_mode()
805 struct mlx4_en_dev *mdev) in mlx4_en_set_promisc_mode() argument
815 switch (mdev->dev->caps.steering_mode) { in mlx4_en_set_promisc_mode()
817 err = mlx4_flow_steer_promisc_add(mdev->dev, in mlx4_en_set_promisc_mode()
827 err = mlx4_unicast_promisc_add(mdev->dev, in mlx4_en_set_promisc_mode()
837 err = mlx4_multicast_promisc_add(mdev->dev, in mlx4_en_set_promisc_mode()
847 err = mlx4_SET_PORT_qpn_calc(mdev->dev, in mlx4_en_set_promisc_mode()
857 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_set_promisc_mode()
865 struct mlx4_en_dev *mdev) in mlx4_en_clear_promisc_mode() argument
874 switch (mdev->dev->caps.steering_mode) { in mlx4_en_clear_promisc_mode()
876 err = mlx4_flow_steer_promisc_remove(mdev->dev, in mlx4_en_clear_promisc_mode()
885 err = mlx4_unicast_promisc_remove(mdev->dev, in mlx4_en_clear_promisc_mode()
892 err = mlx4_multicast_promisc_remove(mdev->dev, in mlx4_en_clear_promisc_mode()
902 err = mlx4_SET_PORT_qpn_calc(mdev->dev, in mlx4_en_clear_promisc_mode()
913 struct mlx4_en_dev *mdev) in mlx4_en_do_multicast() argument
922 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
929 switch (mdev->dev->caps.steering_mode) { in mlx4_en_do_multicast()
931 err = mlx4_flow_steer_promisc_add(mdev->dev, in mlx4_en_do_multicast()
938 err = mlx4_multicast_promisc_add(mdev->dev, in mlx4_en_do_multicast()
953 switch (mdev->dev->caps.steering_mode) { in mlx4_en_do_multicast()
955 err = mlx4_flow_steer_promisc_remove(mdev->dev, in mlx4_en_do_multicast()
961 err = mlx4_multicast_promisc_remove(mdev->dev, in mlx4_en_do_multicast()
974 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
980 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, in mlx4_en_do_multicast()
990 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, in mlx4_en_do_multicast()
993 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, in mlx4_en_do_multicast()
1004 err = mlx4_multicast_detach(mdev->dev, in mlx4_en_do_multicast()
1013 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id); in mlx4_en_do_multicast()
1026 err = mlx4_multicast_attach(mdev->dev, in mlx4_en_do_multicast()
1046 struct mlx4_en_dev *mdev) in mlx4_en_do_uc_filter() argument
1086 mlx4_unregister_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1127 err = mlx4_register_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1141 mlx4_unregister_mac(mdev->dev, priv->port, mac); in mlx4_en_do_uc_filter()
1169 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_do_set_rx_mode() local
1172 mutex_lock(&mdev->state_lock); in mlx4_en_do_set_rx_mode()
1173 if (!mdev->device_up) { in mlx4_en_do_set_rx_mode()
1183 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { in mlx4_en_do_set_rx_mode()
1193 mlx4_en_do_uc_filter(priv, dev, mdev); in mlx4_en_do_set_rx_mode()
1198 mlx4_en_set_promisc_mode(priv, mdev); in mlx4_en_do_set_rx_mode()
1204 mlx4_en_clear_promisc_mode(priv, mdev); in mlx4_en_do_set_rx_mode()
1206 mlx4_en_do_multicast(priv, dev, mdev); in mlx4_en_do_set_rx_mode()
1208 mutex_unlock(&mdev->state_lock); in mlx4_en_do_set_rx_mode()
1257 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_set_rss_steer_rules()
1282 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac); in mlx4_en_delete_rss_steer_rules()
1289 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); in mlx4_en_delete_rss_steer_rules()
1297 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_tx_timeout() local
1313 queue_work(mdev->workqueue, &priv->watchdog_task); in mlx4_en_tx_timeout()
1441 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_do_get_stats() local
1444 mutex_lock(&mdev->state_lock); in mlx4_en_do_get_stats()
1445 if (mdev->device_up) { in mlx4_en_do_get_stats()
1447 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); in mlx4_en_do_get_stats()
1454 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); in mlx4_en_do_get_stats()
1456 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { in mlx4_en_do_get_stats()
1458 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; in mlx4_en_do_get_stats()
1460 mutex_unlock(&mdev->state_lock); in mlx4_en_do_get_stats()
1471 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_service_task() local
1473 mutex_lock(&mdev->state_lock); in mlx4_en_service_task()
1474 if (mdev->device_up) { in mlx4_en_service_task()
1475 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) in mlx4_en_service_task()
1476 mlx4_en_ptp_overflow_check(mdev); in mlx4_en_service_task()
1479 queue_delayed_work(mdev->workqueue, &priv->service_task, in mlx4_en_service_task()
1482 mutex_unlock(&mdev->state_lock); in mlx4_en_service_task()
1489 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_linkstate() local
1492 mutex_lock(&mdev->state_lock); in mlx4_en_linkstate()
1505 mutex_unlock(&mdev->state_lock); in mlx4_en_linkstate()
1511 int numa_node = priv->mdev->dev->numa_node; in mlx4_en_init_affinity_hint()
1529 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_start_port() local
1606 mdev->mac_removed[priv->port] = 0; in mlx4_en_start_port()
1609 mlx4_get_default_counter_index(mdev->dev, priv->port); in mlx4_en_start_port()
1660 err = mlx4_SET_PORT_general(mdev->dev, priv->port, in mlx4_en_start_port()
1672 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); in mlx4_en_start_port()
1678 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { in mlx4_en_start_port()
1679 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); in mlx4_en_start_port()
1689 err = mlx4_INIT_PORT(mdev->dev, priv->port); in mlx4_en_start_port()
1696 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 && in mlx4_en_start_port()
1698 mlx4_warn(mdev, "Failed setting steering rules\n"); in mlx4_en_start_port()
1703 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, in mlx4_en_start_port()
1706 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); in mlx4_en_start_port()
1712 queue_work(mdev->workqueue, &priv->rx_mode_task); in mlx4_en_start_port()
1715 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_start_port()
1749 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_stop_port() local
1761 mlx4_CLOSE_PORT(mdev->dev, priv->port); in mlx4_en_stop_port()
1774 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); in mlx4_en_stop_port()
1777 if (mdev->dev->caps.steering_mode == in mlx4_en_stop_port()
1781 mlx4_flow_steer_promisc_remove(mdev->dev, in mlx4_en_stop_port()
1784 mlx4_flow_steer_promisc_remove(mdev->dev, in mlx4_en_stop_port()
1791 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, in mlx4_en_stop_port()
1796 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, in mlx4_en_stop_port()
1805 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, in mlx4_en_stop_port()
1810 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, in mlx4_en_stop_port()
1813 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); in mlx4_en_stop_port()
1822 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); in mlx4_en_stop_port()
1825 if (mdev->dev->caps.steering_mode == in mlx4_en_stop_port()
1830 mlx4_flow_detach(mdev->dev, flow->id); in mlx4_en_stop_port()
1847 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) in mlx4_en_stop_port()
1855 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN)) in mlx4_en_stop_port()
1856 mdev->mac_removed[priv->port] = 1; in mlx4_en_stop_port()
1881 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_restart() local
1886 mutex_lock(&mdev->state_lock); in mlx4_en_restart()
1892 mutex_unlock(&mdev->state_lock); in mlx4_en_restart()
1898 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_clear_stats() local
1901 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) in mlx4_en_clear_stats()
1933 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_open() local
1936 mutex_lock(&mdev->state_lock); in mlx4_en_open()
1938 if (!mdev->device_up) { in mlx4_en_open()
1952 mutex_unlock(&mdev->state_lock); in mlx4_en_open()
1960 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_close() local
1964 mutex_lock(&mdev->state_lock); in mlx4_en_close()
1969 mutex_unlock(&mdev->state_lock); in mlx4_en_close()
2031 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port); in mlx4_en_alloc_resources()
2059 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_destroy_netdev() local
2068 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); in mlx4_en_destroy_netdev()
2073 flush_workqueue(mdev->workqueue); in mlx4_en_destroy_netdev()
2075 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) in mlx4_en_destroy_netdev()
2076 mlx4_en_remove_timestamp(mdev); in mlx4_en_destroy_netdev()
2079 mutex_lock(&mdev->state_lock); in mlx4_en_destroy_netdev()
2080 mdev->pndev[priv->port] = NULL; in mlx4_en_destroy_netdev()
2081 mdev->upper[priv->port] = NULL; in mlx4_en_destroy_netdev()
2082 mutex_unlock(&mdev->state_lock); in mlx4_en_destroy_netdev()
2095 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_change_mtu() local
2108 mutex_lock(&mdev->state_lock); in mlx4_en_change_mtu()
2109 if (!mdev->device_up) { in mlx4_en_change_mtu()
2119 queue_work(mdev->workqueue, &priv->watchdog_task); in mlx4_en_change_mtu()
2122 mutex_unlock(&mdev->state_lock); in mlx4_en_change_mtu()
2130 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_hwtstamp_set() local
2141 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)) in mlx4_en_hwtstamp_set()
2210 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_fix_features() local
2217 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) in mlx4_en_fix_features()
2243 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev, in mlx4_en_set_features()
2282 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_set_vf_mac() local
2288 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); in mlx4_en_set_vf_mac()
2294 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_set_vf_vlan() local
2296 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos); in mlx4_en_set_vf_vlan()
2303 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_set_vf_rate() local
2305 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate, in mlx4_en_set_vf_rate()
2312 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_set_vf_spoofchk() local
2314 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting); in mlx4_en_set_vf_spoofchk()
2320 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_get_vf_config() local
2322 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf); in mlx4_en_get_vf_config()
2328 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_set_vf_link_state() local
2330 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state); in mlx4_en_set_vf_link_state()
2337 struct mlx4_en_dev *mdev = en_priv->mdev; in mlx4_en_get_vf_stats() local
2339 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats); in mlx4_en_get_vf_stats()
2347 struct mlx4_dev *mdev = priv->mdev->dev; in mlx4_en_get_phys_port_id() local
2349 u64 phys_port_id = mdev->caps.phys_port_id[priv->port]; in mlx4_en_get_phys_port_id()
2369 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); in mlx4_en_add_vxlan_offloads()
2373 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, in mlx4_en_add_vxlan_offloads()
2395 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, in mlx4_en_del_vxlan_offloads()
2409 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_add_vxlan_port()
2423 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); in mlx4_en_add_vxlan_port()
2432 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) in mlx4_en_del_vxlan_port()
2444 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); in mlx4_en_del_vxlan_port()
2463 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT)) in mlx4_en_set_tx_maxrate()
2478 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT, in mlx4_en_set_tx_maxrate()
2569 struct mlx4_dev *dev = bond->priv->mdev->dev; in mlx4_en_bond_work()
2609 queue_work(priv->mdev->workqueue, &bond->work); in mlx4_en_queue_bond_work()
2618 struct mlx4_en_dev *mdev; in mlx4_en_netdev_event() local
2629 mdev = container_of(this, struct mlx4_en_dev, nb); in mlx4_en_netdev_event()
2630 dev = mdev->dev; in mlx4_en_netdev_event()
2637 if (!port && (mdev->pndev[i] == ndev)) in mlx4_en_netdev_event()
2639 mdev->upper[i] = mdev->pndev[i] ? in mlx4_en_netdev_event()
2640 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; in mlx4_en_netdev_event()
2642 if (!mdev->upper[i]) in mlx4_en_netdev_event()
2647 if (mdev->upper[i] != mdev->upper[i-1]) in mlx4_en_netdev_event()
2811 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, in mlx4_en_init_netdev() argument
2827 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev); in mlx4_en_init_netdev()
2836 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev); in mlx4_en_init_netdev()
2853 priv->mdev = mdev; in mlx4_en_init_netdev()
2854 priv->ddev = &mdev->pdev->dev; in mlx4_en_init_netdev()
2862 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; in mlx4_en_init_netdev()
2880 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; in mlx4_en_init_netdev()
2881 priv->cqe_size = mdev->dev->caps.cqe_size; in mlx4_en_init_netdev()
2885 if (!mlx4_is_slave(priv->mdev->dev)) { in mlx4_en_init_netdev()
2886 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { in mlx4_en_init_netdev()
2899 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; in mlx4_en_init_netdev()
2901 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] & in mlx4_en_init_netdev()
2907 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); in mlx4_en_init_netdev()
2913 } else if (mlx4_is_slave(priv->mdev->dev) && in mlx4_en_init_netdev()
2914 (priv->mdev->dev->port_random_macs & 1 << priv->port)) { in mlx4_en_init_netdev()
2936 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, in mlx4_en_init_netdev()
2947 if (mlx4_is_master(priv->mdev->dev)) in mlx4_en_init_netdev()
2961 if (mdev->LSO_support) in mlx4_en_init_netdev()
2973 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) { in mlx4_en_init_netdev()
2979 if (mlx4_is_slave(mdev->dev)) { in mlx4_en_init_netdev()
2982 err = get_phv_bit(mdev->dev, port, &phv); in mlx4_en_init_netdev()
2988 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN && in mlx4_en_init_netdev()
2989 !(mdev->dev->caps.flags2 & in mlx4_en_init_netdev()
2994 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) in mlx4_en_init_netdev()
2997 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS) in mlx4_en_init_netdev()
3000 if (mdev->dev->caps.steering_mode == in mlx4_en_init_netdev()
3002 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) in mlx4_en_init_netdev()
3005 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) in mlx4_en_init_netdev()
3009 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) { in mlx4_en_init_netdev()
3011 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) { in mlx4_en_init_netdev()
3019 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { in mlx4_en_init_netdev()
3024 mdev->pndev[port] = dev; in mlx4_en_init_netdev()
3025 mdev->upper[port] = NULL; in mlx4_en_init_netdev()
3037 err = mlx4_SET_PORT_general(mdev->dev, priv->port, in mlx4_en_init_netdev()
3047 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { in mlx4_en_init_netdev()
3048 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1); in mlx4_en_init_netdev()
3058 err = mlx4_INIT_PORT(mdev->dev, priv->port); in mlx4_en_init_netdev()
3063 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); in mlx4_en_init_netdev()
3066 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) in mlx4_en_init_netdev()
3067 mlx4_en_init_timestamp(mdev); in mlx4_en_init_netdev()
3069 queue_delayed_work(mdev->workqueue, &priv->service_task, in mlx4_en_init_netdev()
3072 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap, in mlx4_en_init_netdev()
3073 mdev->profile.prof[priv->port].rx_ppp, in mlx4_en_init_netdev()
3074 mdev->profile.prof[priv->port].rx_pause, in mlx4_en_init_netdev()
3075 mdev->profile.prof[priv->port].tx_ppp, in mlx4_en_init_netdev()
3076 mdev->profile.prof[priv->port].tx_pause); in mlx4_en_init_netdev()
3098 struct mlx4_en_dev *mdev = priv->mdev; in mlx4_en_reset_config() local
3115 mutex_lock(&mdev->state_lock); in mlx4_en_reset_config()
3173 mutex_unlock(&mdev->state_lock); in mlx4_en_reset_config()