Lines Matching refs:adapter
99 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter) in ixgbevf_service_event_schedule() argument
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_service_event_schedule()
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && in ixgbevf_service_event_schedule()
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) in ixgbevf_service_event_schedule()
104 schedule_work(&adapter->service_task); in ixgbevf_service_event_schedule()
107 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter) in ixgbevf_service_event_complete() argument
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); in ixgbevf_service_event_complete()
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_service_event_complete()
117 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
119 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
123 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_remove_adapter() local
128 dev_err(&adapter->pdev->dev, "Adapter removed\n"); in ixgbevf_remove_adapter()
129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_remove_adapter()
130 ixgbevf_service_event_schedule(adapter); in ixgbevf_remove_adapter()
172 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, in ixgbevf_set_ivar() argument
176 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_ivar()
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending() local
226 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_get_tx_pending()
265 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) in ixgbevf_tx_timeout_reset() argument
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_tx_timeout_reset()
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; in ixgbevf_tx_timeout_reset()
270 ixgbevf_service_event_schedule(adapter); in ixgbevf_tx_timeout_reset()
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_tx_timeout() local
282 ixgbevf_tx_timeout_reset(adapter); in ixgbevf_tx_timeout()
293 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq() local
300 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_clean_tx_irq()
389 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_clean_tx_irq()
414 ixgbevf_tx_timeout_reset(adapter); in ixgbevf_clean_tx_irq()
429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_clean_tx_irq()
903 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, in ixgbevf_irq_enable_queues() argument
906 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable_queues()
1009 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_poll() local
1046 if (adapter->rx_itr_setting & 1) in ixgbevf_poll()
1048 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_poll()
1049 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) in ixgbevf_poll()
1050 ixgbevf_irq_enable_queues(adapter, in ixgbevf_poll()
1062 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_write_eitr() local
1063 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_eitr()
1081 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_busy_poll_recv() local
1085 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_busy_poll_recv()
1116 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) in ixgbevf_configure_msix() argument
1121 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_configure_msix()
1122 adapter->eims_enable_mask = 0; in ixgbevf_configure_msix()
1130 q_vector = adapter->q_vector[v_idx]; in ixgbevf_configure_msix()
1133 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1136 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1140 if (adapter->tx_itr_setting == 1) in ixgbevf_configure_msix()
1143 q_vector->itr = adapter->tx_itr_setting; in ixgbevf_configure_msix()
1146 if (adapter->rx_itr_setting == 1) in ixgbevf_configure_msix()
1149 q_vector->itr = adapter->rx_itr_setting; in ixgbevf_configure_msix()
1153 adapter->eims_enable_mask |= 1 << v_idx; in ixgbevf_configure_msix()
1158 ixgbevf_set_ivar(adapter, -1, 1, v_idx); in ixgbevf_configure_msix()
1160 adapter->eims_other = 1 << v_idx; in ixgbevf_configure_msix()
1161 adapter->eims_enable_mask |= adapter->eims_other; in ixgbevf_configure_msix()
1268 struct ixgbevf_adapter *adapter = data; in ixgbevf_msix_other() local
1269 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_msix_other()
1273 ixgbevf_service_event_schedule(adapter); in ixgbevf_msix_other()
1275 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); in ixgbevf_msix_other()
1326 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) in ixgbevf_map_rings_to_vectors() argument
1331 int rxr_remaining = adapter->num_rx_queues; in ixgbevf_map_rings_to_vectors()
1332 int txr_remaining = adapter->num_tx_queues; in ixgbevf_map_rings_to_vectors()
1337 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_map_rings_to_vectors()
1342 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { in ixgbevf_map_rings_to_vectors()
1344 map_vector_to_rxq(adapter, v_start, rxr_idx); in ixgbevf_map_rings_to_vectors()
1347 map_vector_to_txq(adapter, v_start, txr_idx); in ixgbevf_map_rings_to_vectors()
1359 map_vector_to_rxq(adapter, i, rxr_idx); in ixgbevf_map_rings_to_vectors()
1367 map_vector_to_txq(adapter, i, txr_idx); in ixgbevf_map_rings_to_vectors()
1384 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) in ixgbevf_request_msix_irqs() argument
1386 struct net_device *netdev = adapter->netdev; in ixgbevf_request_msix_irqs()
1387 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_request_msix_irqs()
1392 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; in ixgbevf_request_msix_irqs()
1393 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbevf_request_msix_irqs()
1412 hw_dbg(&adapter->hw, in ixgbevf_request_msix_irqs()
1419 err = request_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1420 &ixgbevf_msix_other, 0, netdev->name, adapter); in ixgbevf_request_msix_irqs()
1422 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", in ixgbevf_request_msix_irqs()
1432 free_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1433 adapter->q_vector[vector]); in ixgbevf_request_msix_irqs()
1445 adapter->num_msix_vectors = 0; in ixgbevf_request_msix_irqs()
1449 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) in ixgbevf_reset_q_vectors() argument
1451 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_reset_q_vectors()
1454 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; in ixgbevf_reset_q_vectors()
1470 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) in ixgbevf_request_irq() argument
1474 err = ixgbevf_request_msix_irqs(adapter); in ixgbevf_request_irq()
1477 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); in ixgbevf_request_irq()
1482 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) in ixgbevf_free_irq() argument
1486 q_vectors = adapter->num_msix_vectors; in ixgbevf_free_irq()
1489 free_irq(adapter->msix_entries[i].vector, adapter); in ixgbevf_free_irq()
1494 if (!adapter->q_vector[i]->rx.ring && in ixgbevf_free_irq()
1495 !adapter->q_vector[i]->tx.ring) in ixgbevf_free_irq()
1498 free_irq(adapter->msix_entries[i].vector, in ixgbevf_free_irq()
1499 adapter->q_vector[i]); in ixgbevf_free_irq()
1502 ixgbevf_reset_q_vectors(adapter); in ixgbevf_free_irq()
1509 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) in ixgbevf_irq_disable() argument
1511 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_disable()
1520 for (i = 0; i < adapter->num_msix_vectors; i++) in ixgbevf_irq_disable()
1521 synchronize_irq(adapter->msix_entries[i].vector); in ixgbevf_irq_disable()
1528 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) in ixgbevf_irq_enable() argument
1530 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable()
1532 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1533 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1534 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1544 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, in ixgbevf_configure_tx_ring() argument
1547 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_tx_ring()
1574 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); in ixgbevf_configure_tx_ring()
1609 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) in ixgbevf_configure_tx() argument
1614 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_configure_tx()
1615 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
1620 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) in ixgbevf_configure_srrctl() argument
1622 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_srrctl()
1634 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) in ixgbevf_setup_psrtype() argument
1636 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_psrtype()
1643 if (adapter->num_rx_queues > 1) in ixgbevf_setup_psrtype()
1650 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, in ixgbevf_disable_rx_queue() argument
1653 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_disable_rx_queue()
1677 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, in ixgbevf_rx_desc_queue_enable() argument
1680 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_rx_desc_queue_enable()
1697 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter) in ixgbevf_setup_vfmrqc() argument
1699 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_vfmrqc()
1701 u16 rss_i = adapter->num_rx_queues; in ixgbevf_setup_vfmrqc()
1705 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); in ixgbevf_setup_vfmrqc()
1707 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]); in ixgbevf_setup_vfmrqc()
1713 adapter->rss_indir_tbl[i] = j; in ixgbevf_setup_vfmrqc()
1733 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, in ixgbevf_configure_rx_ring() argument
1736 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx_ring()
1743 ixgbevf_disable_rx_queue(adapter, ring); in ixgbevf_configure_rx_ring()
1757 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); in ixgbevf_configure_rx_ring()
1764 ixgbevf_configure_srrctl(adapter, reg_idx); in ixgbevf_configure_rx_ring()
1772 ixgbevf_rx_desc_queue_enable(adapter, ring); in ixgbevf_configure_rx_ring()
1782 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) in ixgbevf_configure_rx() argument
1785 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx()
1786 struct net_device *netdev = adapter->netdev; in ixgbevf_configure_rx()
1788 ixgbevf_setup_psrtype(adapter); in ixgbevf_configure_rx()
1790 ixgbevf_setup_vfmrqc(adapter); in ixgbevf_configure_rx()
1798 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_configure_rx()
1799 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbevf_configure_rx()
1805 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_vlan_rx_add_vid() local
1806 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_add_vid()
1809 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
1814 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
1823 set_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_add_vid()
1831 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_vlan_rx_kill_vid() local
1832 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_kill_vid()
1835 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
1840 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
1842 clear_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_kill_vid()
1847 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) in ixgbevf_restore_vlan() argument
1851 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in ixgbevf_restore_vlan()
1852 ixgbevf_vlan_rx_add_vid(adapter->netdev, in ixgbevf_restore_vlan()
1858 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_write_uc_addr_list() local
1859 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_uc_addr_list()
1895 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_set_rx_mode() local
1896 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_rx_mode()
1904 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
1913 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
1916 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) in ixgbevf_napi_enable_all() argument
1920 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_enable_all()
1923 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all()
1925 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]); in ixgbevf_napi_enable_all()
1931 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) in ixgbevf_napi_disable_all() argument
1935 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_disable_all()
1938 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
1941 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) { in ixgbevf_napi_disable_all()
1949 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) in ixgbevf_configure_dcb() argument
1951 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_dcb()
1954 unsigned int num_rx_queues = adapter->num_rx_queues; in ixgbevf_configure_dcb()
1955 unsigned int num_tx_queues = adapter->num_tx_queues; in ixgbevf_configure_dcb()
1958 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
1963 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
1973 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
1980 if ((adapter->num_rx_queues != num_rx_queues) || in ixgbevf_configure_dcb()
1981 (adapter->num_tx_queues != num_tx_queues)) { in ixgbevf_configure_dcb()
1986 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; in ixgbevf_configure_dcb()
1992 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) in ixgbevf_configure() argument
1994 ixgbevf_configure_dcb(adapter); in ixgbevf_configure()
1996 ixgbevf_set_rx_mode(adapter->netdev); in ixgbevf_configure()
1998 ixgbevf_restore_vlan(adapter); in ixgbevf_configure()
2000 ixgbevf_configure_tx(adapter); in ixgbevf_configure()
2001 ixgbevf_configure_rx(adapter); in ixgbevf_configure()
2004 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) in ixgbevf_save_reset_stats() argument
2007 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { in ixgbevf_save_reset_stats()
2008 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - in ixgbevf_save_reset_stats()
2009 adapter->stats.base_vfgprc; in ixgbevf_save_reset_stats()
2010 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - in ixgbevf_save_reset_stats()
2011 adapter->stats.base_vfgptc; in ixgbevf_save_reset_stats()
2012 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - in ixgbevf_save_reset_stats()
2013 adapter->stats.base_vfgorc; in ixgbevf_save_reset_stats()
2014 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - in ixgbevf_save_reset_stats()
2015 adapter->stats.base_vfgotc; in ixgbevf_save_reset_stats()
2016 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - in ixgbevf_save_reset_stats()
2017 adapter->stats.base_vfmprc; in ixgbevf_save_reset_stats()
2021 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) in ixgbevf_init_last_counter_stats() argument
2023 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_init_last_counter_stats()
2025 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); in ixgbevf_init_last_counter_stats()
2026 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); in ixgbevf_init_last_counter_stats()
2027 adapter->stats.last_vfgorc |= in ixgbevf_init_last_counter_stats()
2029 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); in ixgbevf_init_last_counter_stats()
2030 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); in ixgbevf_init_last_counter_stats()
2031 adapter->stats.last_vfgotc |= in ixgbevf_init_last_counter_stats()
2033 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); in ixgbevf_init_last_counter_stats()
2035 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; in ixgbevf_init_last_counter_stats()
2036 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; in ixgbevf_init_last_counter_stats()
2037 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; in ixgbevf_init_last_counter_stats()
2038 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; in ixgbevf_init_last_counter_stats()
2039 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; in ixgbevf_init_last_counter_stats()
2042 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) in ixgbevf_negotiate_api() argument
2044 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_negotiate_api()
2051 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2060 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2063 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) in ixgbevf_up_complete() argument
2065 struct net_device *netdev = adapter->netdev; in ixgbevf_up_complete()
2066 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_up_complete()
2068 ixgbevf_configure_msix(adapter); in ixgbevf_up_complete()
2070 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2077 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2080 clear_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_up_complete()
2081 ixgbevf_napi_enable_all(adapter); in ixgbevf_up_complete()
2085 ixgbevf_irq_enable(adapter); in ixgbevf_up_complete()
2090 ixgbevf_save_reset_stats(adapter); in ixgbevf_up_complete()
2091 ixgbevf_init_last_counter_stats(adapter); in ixgbevf_up_complete()
2094 mod_timer(&adapter->service_timer, jiffies); in ixgbevf_up_complete()
2097 void ixgbevf_up(struct ixgbevf_adapter *adapter) in ixgbevf_up() argument
2099 ixgbevf_configure(adapter); in ixgbevf_up()
2101 ixgbevf_up_complete(adapter); in ixgbevf_up()
2174 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) in ixgbevf_clean_all_rx_rings() argument
2178 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_clean_all_rx_rings()
2179 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2186 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) in ixgbevf_clean_all_tx_rings() argument
2190 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_clean_all_tx_rings()
2191 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2194 void ixgbevf_down(struct ixgbevf_adapter *adapter) in ixgbevf_down() argument
2196 struct net_device *netdev = adapter->netdev; in ixgbevf_down()
2197 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_down()
2201 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_down()
2205 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_down()
2206 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2216 ixgbevf_irq_disable(adapter); in ixgbevf_down()
2218 ixgbevf_napi_disable_all(adapter); in ixgbevf_down()
2220 del_timer_sync(&adapter->service_timer); in ixgbevf_down()
2223 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_down()
2224 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2230 if (!pci_channel_offline(adapter->pdev)) in ixgbevf_down()
2231 ixgbevf_reset(adapter); in ixgbevf_down()
2233 ixgbevf_clean_all_tx_rings(adapter); in ixgbevf_down()
2234 ixgbevf_clean_all_rx_rings(adapter); in ixgbevf_down()
2237 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) in ixgbevf_reinit_locked() argument
2241 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_reinit_locked()
2244 ixgbevf_down(adapter); in ixgbevf_reinit_locked()
2245 ixgbevf_up(adapter); in ixgbevf_reinit_locked()
2247 clear_bit(__IXGBEVF_RESETTING, &adapter->state); in ixgbevf_reinit_locked()
2250 void ixgbevf_reset(struct ixgbevf_adapter *adapter) in ixgbevf_reset() argument
2252 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_reset()
2253 struct net_device *netdev = adapter->netdev; in ixgbevf_reset()
2259 ixgbevf_negotiate_api(adapter); in ixgbevf_reset()
2262 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in ixgbevf_reset()
2263 memcpy(netdev->dev_addr, adapter->hw.mac.addr, in ixgbevf_reset()
2265 memcpy(netdev->perm_addr, adapter->hw.mac.addr, in ixgbevf_reset()
2269 adapter->last_reset = jiffies; in ixgbevf_reset()
2272 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, in ixgbevf_acquire_msix_vectors() argument
2288 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbevf_acquire_msix_vectors()
2292 dev_err(&adapter->pdev->dev, in ixgbevf_acquire_msix_vectors()
2294 kfree(adapter->msix_entries); in ixgbevf_acquire_msix_vectors()
2295 adapter->msix_entries = NULL; in ixgbevf_acquire_msix_vectors()
2303 adapter->num_msix_vectors = vectors; in ixgbevf_acquire_msix_vectors()
2319 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) in ixgbevf_set_num_queues() argument
2321 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_num_queues()
2327 adapter->num_rx_queues = 1; in ixgbevf_set_num_queues()
2328 adapter->num_tx_queues = 1; in ixgbevf_set_num_queues()
2330 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2335 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2342 adapter->num_rx_queues = num_tcs; in ixgbevf_set_num_queues()
2349 adapter->num_rx_queues = rss; in ixgbevf_set_num_queues()
2350 adapter->num_tx_queues = rss; in ixgbevf_set_num_queues()
2365 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) in ixgbevf_alloc_queues() argument
2370 for (; tx < adapter->num_tx_queues; tx++) { in ixgbevf_alloc_queues()
2375 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_queues()
2376 ring->netdev = adapter->netdev; in ixgbevf_alloc_queues()
2377 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_queues()
2381 adapter->tx_ring[tx] = ring; in ixgbevf_alloc_queues()
2384 for (; rx < adapter->num_rx_queues; rx++) { in ixgbevf_alloc_queues()
2389 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_queues()
2390 ring->netdev = adapter->netdev; in ixgbevf_alloc_queues()
2392 ring->count = adapter->rx_ring_count; in ixgbevf_alloc_queues()
2396 adapter->rx_ring[rx] = ring; in ixgbevf_alloc_queues()
2403 kfree(adapter->tx_ring[--tx]); in ixgbevf_alloc_queues()
2404 adapter->tx_ring[tx] = NULL; in ixgbevf_alloc_queues()
2408 kfree(adapter->rx_ring[--rx]); in ixgbevf_alloc_queues()
2409 adapter->rx_ring[rx] = NULL; in ixgbevf_alloc_queues()
2421 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) in ixgbevf_set_interrupt_capability() argument
2423 struct net_device *netdev = adapter->netdev; in ixgbevf_set_interrupt_capability()
2433 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability()
2440 adapter->msix_entries = kcalloc(v_budget, in ixgbevf_set_interrupt_capability()
2442 if (!adapter->msix_entries) { in ixgbevf_set_interrupt_capability()
2448 adapter->msix_entries[vector].entry = vector; in ixgbevf_set_interrupt_capability()
2450 err = ixgbevf_acquire_msix_vectors(adapter, v_budget); in ixgbevf_set_interrupt_capability()
2454 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability()
2458 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); in ixgbevf_set_interrupt_capability()
2471 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) in ixgbevf_alloc_q_vectors() argument
2476 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_alloc_q_vectors()
2482 q_vector->adapter = adapter; in ixgbevf_alloc_q_vectors()
2484 netif_napi_add(adapter->netdev, &q_vector->napi, in ixgbevf_alloc_q_vectors()
2489 adapter->q_vector[q_idx] = q_vector; in ixgbevf_alloc_q_vectors()
2497 q_vector = adapter->q_vector[q_idx]; in ixgbevf_alloc_q_vectors()
2503 adapter->q_vector[q_idx] = NULL; in ixgbevf_alloc_q_vectors()
2516 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) in ixgbevf_free_q_vectors() argument
2518 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_free_q_vectors()
2521 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; in ixgbevf_free_q_vectors()
2523 adapter->q_vector[q_idx] = NULL; in ixgbevf_free_q_vectors()
2537 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) in ixgbevf_reset_interrupt_capability() argument
2539 pci_disable_msix(adapter->pdev); in ixgbevf_reset_interrupt_capability()
2540 kfree(adapter->msix_entries); in ixgbevf_reset_interrupt_capability()
2541 adapter->msix_entries = NULL; in ixgbevf_reset_interrupt_capability()
2549 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) in ixgbevf_init_interrupt_scheme() argument
2554 ixgbevf_set_num_queues(adapter); in ixgbevf_init_interrupt_scheme()
2556 err = ixgbevf_set_interrupt_capability(adapter); in ixgbevf_init_interrupt_scheme()
2558 hw_dbg(&adapter->hw, in ixgbevf_init_interrupt_scheme()
2563 err = ixgbevf_alloc_q_vectors(adapter); in ixgbevf_init_interrupt_scheme()
2565 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); in ixgbevf_init_interrupt_scheme()
2569 err = ixgbevf_alloc_queues(adapter); in ixgbevf_init_interrupt_scheme()
2575 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", in ixgbevf_init_interrupt_scheme()
2576 (adapter->num_rx_queues > 1) ? "Enabled" : in ixgbevf_init_interrupt_scheme()
2577 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_init_interrupt_scheme()
2579 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_init_interrupt_scheme()
2583 ixgbevf_free_q_vectors(adapter); in ixgbevf_init_interrupt_scheme()
2585 ixgbevf_reset_interrupt_capability(adapter); in ixgbevf_init_interrupt_scheme()
2597 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) in ixgbevf_clear_interrupt_scheme() argument
2601 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_clear_interrupt_scheme()
2602 kfree(adapter->tx_ring[i]); in ixgbevf_clear_interrupt_scheme()
2603 adapter->tx_ring[i] = NULL; in ixgbevf_clear_interrupt_scheme()
2605 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_clear_interrupt_scheme()
2606 kfree(adapter->rx_ring[i]); in ixgbevf_clear_interrupt_scheme()
2607 adapter->rx_ring[i] = NULL; in ixgbevf_clear_interrupt_scheme()
2610 adapter->num_tx_queues = 0; in ixgbevf_clear_interrupt_scheme()
2611 adapter->num_rx_queues = 0; in ixgbevf_clear_interrupt_scheme()
2613 ixgbevf_free_q_vectors(adapter); in ixgbevf_clear_interrupt_scheme()
2614 ixgbevf_reset_interrupt_capability(adapter); in ixgbevf_clear_interrupt_scheme()
2625 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) in ixgbevf_sw_init() argument
2627 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_sw_init()
2628 struct pci_dev *pdev = adapter->pdev; in ixgbevf_sw_init()
2629 struct net_device *netdev = adapter->netdev; in ixgbevf_sw_init()
2646 spin_lock_init(&adapter->mbx_lock); in ixgbevf_sw_init()
2658 ixgbevf_negotiate_api(adapter); in ixgbevf_sw_init()
2662 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in ixgbevf_sw_init()
2675 adapter->rx_itr_setting = 1; in ixgbevf_sw_init()
2676 adapter->tx_itr_setting = 1; in ixgbevf_sw_init()
2679 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; in ixgbevf_sw_init()
2680 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; in ixgbevf_sw_init()
2682 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_sw_init()
2715 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) in ixgbevf_update_stats() argument
2717 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_update_stats()
2720 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_update_stats()
2721 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_update_stats()
2724 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, in ixgbevf_update_stats()
2725 adapter->stats.vfgprc); in ixgbevf_update_stats()
2726 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, in ixgbevf_update_stats()
2727 adapter->stats.vfgptc); in ixgbevf_update_stats()
2729 adapter->stats.last_vfgorc, in ixgbevf_update_stats()
2730 adapter->stats.vfgorc); in ixgbevf_update_stats()
2732 adapter->stats.last_vfgotc, in ixgbevf_update_stats()
2733 adapter->stats.vfgotc); in ixgbevf_update_stats()
2734 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, in ixgbevf_update_stats()
2735 adapter->stats.vfmprc); in ixgbevf_update_stats()
2737 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_update_stats()
2738 adapter->hw_csum_rx_error += in ixgbevf_update_stats()
2739 adapter->rx_ring[i]->hw_csum_rx_error; in ixgbevf_update_stats()
2740 adapter->rx_ring[i]->hw_csum_rx_error = 0; in ixgbevf_update_stats()
2750 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; in ixgbevf_service_timer() local
2753 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); in ixgbevf_service_timer()
2755 ixgbevf_service_event_schedule(adapter); in ixgbevf_service_timer()
2758 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) in ixgbevf_reset_subtask() argument
2760 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) in ixgbevf_reset_subtask()
2763 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; in ixgbevf_reset_subtask()
2766 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_reset_subtask()
2767 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_reset_subtask()
2770 adapter->tx_timeout_count++; in ixgbevf_reset_subtask()
2772 ixgbevf_reinit_locked(adapter); in ixgbevf_reset_subtask()
2784 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) in ixgbevf_check_hang_subtask() argument
2786 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_check_hang_subtask()
2791 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_check_hang_subtask()
2792 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_check_hang_subtask()
2796 if (netif_carrier_ok(adapter->netdev)) { in ixgbevf_check_hang_subtask()
2797 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_check_hang_subtask()
2798 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
2802 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { in ixgbevf_check_hang_subtask()
2803 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; in ixgbevf_check_hang_subtask()
2817 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) in ixgbevf_watchdog_update_link() argument
2819 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_watchdog_update_link()
2820 u32 link_speed = adapter->link_speed; in ixgbevf_watchdog_update_link()
2821 bool link_up = adapter->link_up; in ixgbevf_watchdog_update_link()
2824 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
2828 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
2831 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { in ixgbevf_watchdog_update_link()
2832 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; in ixgbevf_watchdog_update_link()
2836 adapter->link_up = link_up; in ixgbevf_watchdog_update_link()
2837 adapter->link_speed = link_speed; in ixgbevf_watchdog_update_link()
2845 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) in ixgbevf_watchdog_link_is_up() argument
2847 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_up()
2853 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", in ixgbevf_watchdog_link_is_up()
2854 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? in ixgbevf_watchdog_link_is_up()
2856 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? in ixgbevf_watchdog_link_is_up()
2858 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? in ixgbevf_watchdog_link_is_up()
2870 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) in ixgbevf_watchdog_link_is_down() argument
2872 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_down()
2874 adapter->link_speed = 0; in ixgbevf_watchdog_link_is_down()
2880 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); in ixgbevf_watchdog_link_is_down()
2889 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter) in ixgbevf_watchdog_subtask() argument
2892 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_watchdog_subtask()
2893 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_watchdog_subtask()
2896 ixgbevf_watchdog_update_link(adapter); in ixgbevf_watchdog_subtask()
2898 if (adapter->link_up) in ixgbevf_watchdog_subtask()
2899 ixgbevf_watchdog_link_is_up(adapter); in ixgbevf_watchdog_subtask()
2901 ixgbevf_watchdog_link_is_down(adapter); in ixgbevf_watchdog_subtask()
2903 ixgbevf_update_stats(adapter); in ixgbevf_watchdog_subtask()
2912 struct ixgbevf_adapter *adapter = container_of(work, in ixgbevf_service_task() local
2915 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_service_task()
2918 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_service_task()
2920 ixgbevf_down(adapter); in ixgbevf_service_task()
2926 ixgbevf_queue_reset_subtask(adapter); in ixgbevf_service_task()
2927 ixgbevf_reset_subtask(adapter); in ixgbevf_service_task()
2928 ixgbevf_watchdog_subtask(adapter); in ixgbevf_service_task()
2929 ixgbevf_check_hang_subtask(adapter); in ixgbevf_service_task()
2931 ixgbevf_service_event_complete(adapter); in ixgbevf_service_task()
2963 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) in ixgbevf_free_all_tx_resources() argument
2967 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_free_all_tx_resources()
2968 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
2969 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
3001 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); in ixgbevf_setup_tx_resources()
3015 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) in ixgbevf_setup_all_tx_resources() argument
3019 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_setup_all_tx_resources()
3020 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3023 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); in ixgbevf_setup_all_tx_resources()
3073 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) in ixgbevf_setup_all_rx_resources() argument
3077 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_setup_all_rx_resources()
3078 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3081 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); in ixgbevf_setup_all_rx_resources()
3112 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) in ixgbevf_free_all_rx_resources() argument
3116 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_free_all_rx_resources()
3117 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3118 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
3135 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_open() local
3136 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_open()
3145 if (!adapter->num_msix_vectors) in ixgbevf_open()
3149 ixgbevf_reset(adapter); in ixgbevf_open()
3161 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) in ixgbevf_open()
3167 err = ixgbevf_setup_all_tx_resources(adapter); in ixgbevf_open()
3172 err = ixgbevf_setup_all_rx_resources(adapter); in ixgbevf_open()
3176 ixgbevf_configure(adapter); in ixgbevf_open()
3182 ixgbevf_map_rings_to_vectors(adapter); in ixgbevf_open()
3184 err = ixgbevf_request_irq(adapter); in ixgbevf_open()
3188 ixgbevf_up_complete(adapter); in ixgbevf_open()
3193 ixgbevf_down(adapter); in ixgbevf_open()
3195 ixgbevf_free_all_rx_resources(adapter); in ixgbevf_open()
3197 ixgbevf_free_all_tx_resources(adapter); in ixgbevf_open()
3198 ixgbevf_reset(adapter); in ixgbevf_open()
3218 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_close() local
3220 ixgbevf_down(adapter); in ixgbevf_close()
3221 ixgbevf_free_irq(adapter); in ixgbevf_close()
3223 ixgbevf_free_all_tx_resources(adapter); in ixgbevf_close()
3224 ixgbevf_free_all_rx_resources(adapter); in ixgbevf_close()
3229 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) in ixgbevf_queue_reset_subtask() argument
3231 struct net_device *dev = adapter->netdev; in ixgbevf_queue_reset_subtask()
3233 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) in ixgbevf_queue_reset_subtask()
3236 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; in ixgbevf_queue_reset_subtask()
3239 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_queue_reset_subtask()
3240 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_queue_reset_subtask()
3250 ixgbevf_clear_interrupt_scheme(adapter); in ixgbevf_queue_reset_subtask()
3251 ixgbevf_init_interrupt_scheme(adapter); in ixgbevf_queue_reset_subtask()
3614 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_xmit_frame() local
3631 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
3694 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_set_mac() local
3695 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_mac()
3704 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
3708 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
3722 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_change_mtu() local
3723 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_change_mtu()
3727 switch (adapter->hw.api_version) { in ixgbevf_change_mtu()
3733 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) in ixgbevf_change_mtu()
3760 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_netpoll() local
3764 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_netpoll()
3766 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_netpoll()
3767 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); in ixgbevf_netpoll()
3774 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_suspend() local
3783 ixgbevf_down(adapter); in ixgbevf_suspend()
3784 ixgbevf_free_irq(adapter); in ixgbevf_suspend()
3785 ixgbevf_free_all_tx_resources(adapter); in ixgbevf_suspend()
3786 ixgbevf_free_all_rx_resources(adapter); in ixgbevf_suspend()
3790 ixgbevf_clear_interrupt_scheme(adapter); in ixgbevf_suspend()
3798 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) in ixgbevf_suspend()
3808 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_resume() local
3823 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_resume()
3826 ixgbevf_reset(adapter); in ixgbevf_resume()
3829 err = ixgbevf_init_interrupt_scheme(adapter); in ixgbevf_resume()
3856 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_get_stats() local
3862 ixgbevf_update_stats(adapter); in ixgbevf_get_stats()
3864 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; in ixgbevf_get_stats()
3866 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_get_stats()
3867 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
3877 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_stats()
3878 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()
3933 struct ixgbevf_adapter *adapter = NULL; in ixgbevf_probe() local
3971 adapter = netdev_priv(netdev); in ixgbevf_probe()
3973 adapter->netdev = netdev; in ixgbevf_probe()
3974 adapter->pdev = pdev; in ixgbevf_probe()
3975 hw = &adapter->hw; in ixgbevf_probe()
3976 hw->back = adapter; in ixgbevf_probe()
3977 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbevf_probe()
3986 adapter->io_addr = hw->hw_addr; in ixgbevf_probe()
4002 err = ixgbevf_sw_init(adapter); in ixgbevf_probe()
4041 setup_timer(&adapter->service_timer, &ixgbevf_service_timer, in ixgbevf_probe()
4042 (unsigned long)adapter); in ixgbevf_probe()
4044 INIT_WORK(&adapter->service_task, ixgbevf_service_task); in ixgbevf_probe()
4045 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); in ixgbevf_probe()
4046 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_probe()
4048 err = ixgbevf_init_interrupt_scheme(adapter); in ixgbevf_probe()
4061 ixgbevf_init_last_counter_stats(adapter); in ixgbevf_probe()
4083 ixgbevf_clear_interrupt_scheme(adapter); in ixgbevf_probe()
4085 ixgbevf_reset_interrupt_capability(adapter); in ixgbevf_probe()
4086 iounmap(adapter->io_addr); in ixgbevf_probe()
4088 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_probe()
4094 if (!adapter || disable_dev) in ixgbevf_probe()
4111 struct ixgbevf_adapter *adapter; in ixgbevf_remove() local
4117 adapter = netdev_priv(netdev); in ixgbevf_remove()
4119 set_bit(__IXGBEVF_REMOVING, &adapter->state); in ixgbevf_remove()
4120 cancel_work_sync(&adapter->service_task); in ixgbevf_remove()
4125 ixgbevf_clear_interrupt_scheme(adapter); in ixgbevf_remove()
4126 ixgbevf_reset_interrupt_capability(adapter); in ixgbevf_remove()
4128 iounmap(adapter->io_addr); in ixgbevf_remove()
4131 hw_dbg(&adapter->hw, "Remove complete\n"); in ixgbevf_remove()
4133 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_remove()
4152 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_io_error_detected() local
4154 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_io_error_detected()
4166 ixgbevf_down(adapter); in ixgbevf_io_error_detected()
4168 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) in ixgbevf_io_error_detected()
4186 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_io_slot_reset() local
4195 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_io_slot_reset()
4198 ixgbevf_reset(adapter); in ixgbevf_io_slot_reset()
4214 struct ixgbevf_adapter *adapter = netdev_priv(netdev); in ixgbevf_io_resume() local
4217 ixgbevf_up(adapter); in ixgbevf_io_resume()
4282 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_get_hw_dev_name() local
4284 return adapter->netdev->name; in ixgbevf_get_hw_dev_name()