Lines Matching refs:adapter
173 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, in ixgbe_read_pci_cfg_word_parent() argument
179 parent_bus = adapter->pdev->bus->parent; in ixgbe_read_pci_cfg_word_parent()
192 ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) in ixgbe_read_pci_cfg_word_parent()
197 static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) in ixgbe_get_parent_bus_info() argument
199 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_parent_bus_info()
208 err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); in ixgbe_get_parent_bus_info()
240 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, in ixgbe_check_minimum_link() argument
250 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_check_minimum_link()
251 pdev = adapter->pdev->bus->parent->self; in ixgbe_check_minimum_link()
253 pdev = adapter->pdev; in ixgbe_check_minimum_link()
300 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) in ixgbe_service_event_schedule() argument
302 if (!test_bit(__IXGBE_DOWN, &adapter->state) && in ixgbe_service_event_schedule()
303 !test_bit(__IXGBE_REMOVING, &adapter->state) && in ixgbe_service_event_schedule()
304 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) in ixgbe_service_event_schedule()
305 schedule_work(&adapter->service_task); in ixgbe_service_event_schedule()
310 struct ixgbe_adapter *adapter = hw->back; in ixgbe_remove_adapter() local
316 if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_remove_adapter()
317 ixgbe_service_event_schedule(adapter); in ixgbe_remove_adapter()
379 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_word() local
384 pci_read_config_word(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_word()
386 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_word()
394 struct ixgbe_adapter *adapter = hw->back; in ixgbe_read_pci_cfg_dword() local
399 pci_read_config_dword(adapter->pdev, reg, &value); in ixgbe_read_pci_cfg_dword()
401 ixgbe_check_cfg_remove(hw, adapter->pdev)) in ixgbe_read_pci_cfg_dword()
409 struct ixgbe_adapter *adapter = hw->back; in ixgbe_write_pci_cfg_word() local
413 pci_write_config_word(adapter->pdev, reg, value); in ixgbe_write_pci_cfg_word()
416 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) in ixgbe_service_event_complete() argument
418 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); in ixgbe_service_event_complete()
422 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_service_event_complete()
548 static void ixgbe_dump(struct ixgbe_adapter *adapter) in ixgbe_dump() argument
550 struct net_device *netdev = adapter->netdev; in ixgbe_dump()
551 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_dump()
564 if (!netif_msg_hw(adapter)) in ixgbe_dump()
569 dev_info(&adapter->pdev->dev, "Net device Info\n"); in ixgbe_dump()
580 dev_info(&adapter->pdev->dev, "Register Dump\n"); in ixgbe_dump()
591 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in ixgbe_dump()
595 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
596 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
607 if (!netif_msg_tx_done(adapter)) in ixgbe_dump()
610 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in ixgbe_dump()
647 for (n = 0; n < adapter->num_tx_queues; n++) { in ixgbe_dump()
648 tx_ring = adapter->tx_ring[n]; in ixgbe_dump()
681 if (netif_msg_pktdata(adapter) && in ixgbe_dump()
694 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in ixgbe_dump()
696 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
697 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
703 if (!netif_msg_rx_status(adapter)) in ixgbe_dump()
706 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in ixgbe_dump()
753 for (n = 0; n < adapter->num_rx_queues; n++) { in ixgbe_dump()
754 rx_ring = adapter->rx_ring[n]; in ixgbe_dump()
787 if (netif_msg_pktdata(adapter) && in ixgbe_dump()
808 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) in ixgbe_release_hw_control() argument
813 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_release_hw_control()
814 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_release_hw_control()
818 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) in ixgbe_get_hw_control() argument
823 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); in ixgbe_get_hw_control()
824 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, in ixgbe_get_hw_control()
836 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, in ixgbe_set_ivar() argument
840 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_ivar()
860 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC); in ixgbe_set_ivar()
863 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar); in ixgbe_set_ivar()
880 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, in ixgbe_irq_rearm_queues() argument
885 switch (adapter->hw.mac.type) { in ixgbe_irq_rearm_queues()
888 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); in ixgbe_irq_rearm_queues()
895 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); in ixgbe_irq_rearm_queues()
897 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); in ixgbe_irq_rearm_queues()
926 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) in ixgbe_update_xoff_rx_lfc() argument
928 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_rx_lfc()
929 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_rx_lfc()
950 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_update_xoff_rx_lfc()
952 &adapter->tx_ring[i]->state); in ixgbe_update_xoff_rx_lfc()
955 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) in ixgbe_update_xoff_received() argument
957 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_xoff_received()
958 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_xoff_received()
962 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_update_xoff_received()
964 if (adapter->ixgbe_ieee_pfc) in ixgbe_update_xoff_received()
965 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_update_xoff_received()
967 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { in ixgbe_update_xoff_received()
968 ixgbe_update_xoff_rx_lfc(adapter); in ixgbe_update_xoff_received()
985 tc = netdev_get_prio_tc_map(adapter->netdev, i); in ixgbe_update_xoff_received()
990 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_xoff_received()
991 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_xoff_received()
1006 struct ixgbe_adapter *adapter; in ixgbe_get_tx_pending() local
1011 adapter = ring->l2_accel_priv->real_adapter; in ixgbe_get_tx_pending()
1013 adapter = netdev_priv(ring->netdev); in ixgbe_get_tx_pending()
1015 hw = &adapter->hw; in ixgbe_get_tx_pending()
1062 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) in ixgbe_tx_timeout_reset() argument
1066 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_tx_timeout_reset()
1067 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; in ixgbe_tx_timeout_reset()
1069 ixgbe_service_event_schedule(adapter); in ixgbe_tx_timeout_reset()
1081 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_tx_irq() local
1088 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_clean_tx_irq()
1178 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_clean_tx_irq()
1197 adapter->tx_timeout_count + 1, tx_ring->queue_index); in ixgbe_clean_tx_irq()
1200 ixgbe_tx_timeout_reset(adapter); in ixgbe_clean_tx_irq()
1218 && !test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_clean_tx_irq()
1229 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, in ixgbe_update_tx_dca() argument
1233 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_tx_dca()
1263 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, in ixgbe_update_rx_dca() argument
1267 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_rx_dca()
1294 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_update_dca() local
1302 ixgbe_update_tx_dca(adapter, ring, cpu); in ixgbe_update_dca()
1305 ixgbe_update_rx_dca(adapter, ring, cpu); in ixgbe_update_dca()
1312 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) in ixgbe_setup_dca() argument
1316 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) in ixgbe_setup_dca()
1320 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); in ixgbe_setup_dca()
1322 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_setup_dca()
1323 adapter->q_vector[i]->cpu = -1; in ixgbe_setup_dca()
1324 ixgbe_update_dca(adapter->q_vector[i]); in ixgbe_setup_dca()
1330 struct ixgbe_adapter *adapter = dev_get_drvdata(dev); in __ixgbe_notify_dca() local
1333 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE)) in __ixgbe_notify_dca()
1339 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in __ixgbe_notify_dca()
1342 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1343 ixgbe_setup_dca(adapter); in __ixgbe_notify_dca()
1348 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in __ixgbe_notify_dca()
1350 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in __ixgbe_notify_dca()
1351 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); in __ixgbe_notify_dca()
1606 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); in ixgbe_process_skb_fields()
2011 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq() local
2064 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); in ixgbe_clean_rx_irq()
2109 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_low_latency_recv() local
2113 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_low_latency_recv()
2144 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) in ixgbe_configure_msix() argument
2151 if (adapter->num_vfs > 32) { in ixgbe_configure_msix()
2152 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; in ixgbe_configure_msix()
2153 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); in ixgbe_configure_msix()
2160 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { in ixgbe_configure_msix()
2162 q_vector = adapter->q_vector[v_idx]; in ixgbe_configure_msix()
2165 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2168 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbe_configure_msix()
2173 switch (adapter->hw.mac.type) { in ixgbe_configure_msix()
2175 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, in ixgbe_configure_msix()
2182 ixgbe_set_ivar(adapter, -1, 1, v_idx); in ixgbe_configure_msix()
2187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); in ixgbe_configure_msix()
2195 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); in ixgbe_configure_msix()
2279 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_write_eitr() local
2280 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_eitr()
2284 switch (adapter->hw.mac.type) { in ixgbe_write_eitr()
2346 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) in ixgbe_check_overtemp_subtask() argument
2348 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_overtemp_subtask()
2349 u32 eicr = adapter->interrupt_event; in ixgbe_check_overtemp_subtask()
2351 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_check_overtemp_subtask()
2354 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && in ixgbe_check_overtemp_subtask()
2355 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) in ixgbe_check_overtemp_subtask()
2358 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_subtask()
2398 adapter->interrupt_event = 0; in ixgbe_check_overtemp_subtask()
2401 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) in ixgbe_check_fan_failure() argument
2403 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_fan_failure()
2405 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && in ixgbe_check_fan_failure()
2413 static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) in ixgbe_check_overtemp_event() argument
2415 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) in ixgbe_check_overtemp_event()
2418 switch (adapter->hw.mac.type) { in ixgbe_check_overtemp_event()
2425 (!test_bit(__IXGBE_DOWN, &adapter->state))) { in ixgbe_check_overtemp_event()
2426 adapter->interrupt_event = eicr; in ixgbe_check_overtemp_event()
2427 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; in ixgbe_check_overtemp_event()
2428 ixgbe_service_event_schedule(adapter); in ixgbe_check_overtemp_event()
2446 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) in ixgbe_check_sfp_event() argument
2448 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_sfp_event()
2453 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
2454 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_check_sfp_event()
2455 ixgbe_service_event_schedule(adapter); in ixgbe_check_sfp_event()
2462 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_sfp_event()
2463 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_check_sfp_event()
2464 ixgbe_service_event_schedule(adapter); in ixgbe_check_sfp_event()
2469 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) in ixgbe_check_lsc() argument
2471 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_lsc()
2473 adapter->lsc_int++; in ixgbe_check_lsc()
2474 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_check_lsc()
2475 adapter->link_check_timeout = jiffies; in ixgbe_check_lsc()
2476 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_check_lsc()
2479 ixgbe_service_event_schedule(adapter); in ixgbe_check_lsc()
2483 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, in ixgbe_irq_enable_queues() argument
2487 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_enable_queues()
2511 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, in ixgbe_irq_disable_queues() argument
2515 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_irq_disable_queues()
2543 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, in ixgbe_irq_enable() argument
2549 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_irq_enable()
2552 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) in ixgbe_irq_enable()
2553 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
2565 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_irq_enable()
2567 switch (adapter->hw.mac.type) { in ixgbe_irq_enable()
2582 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && in ixgbe_irq_enable()
2583 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_irq_enable()
2586 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); in ixgbe_irq_enable()
2588 ixgbe_irq_enable_queues(adapter, ~0); in ixgbe_irq_enable()
2590 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_enable()
2595 struct ixgbe_adapter *adapter = data; in ixgbe_msix_other() local
2596 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_msix_other()
2619 ixgbe_check_lsc(adapter); in ixgbe_msix_other()
2622 ixgbe_msg_task(adapter); in ixgbe_msix_other()
2631 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; in ixgbe_msix_other()
2632 ixgbe_service_event_schedule(adapter); in ixgbe_msix_other()
2639 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_msix_other()
2640 struct ixgbe_ring *ring = adapter->tx_ring[i]; in ixgbe_msix_other()
2648 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_msix_other()
2649 ixgbe_service_event_schedule(adapter); in ixgbe_msix_other()
2652 ixgbe_check_sfp_event(adapter, eicr); in ixgbe_msix_other()
2653 ixgbe_check_overtemp_event(adapter, eicr); in ixgbe_msix_other()
2659 ixgbe_check_fan_failure(adapter, eicr); in ixgbe_msix_other()
2662 ixgbe_ptp_check_pps_event(adapter, eicr); in ixgbe_msix_other()
2665 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_msix_other()
2666 ixgbe_irq_enable(adapter, false, false); in ixgbe_msix_other()
2694 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_poll() local
2700 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) in ixgbe_poll()
2728 if (adapter->rx_itr_setting & 1) in ixgbe_poll()
2730 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_poll()
2731 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); in ixgbe_poll()
2743 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) in ixgbe_request_msix_irqs() argument
2745 struct net_device *netdev = adapter->netdev; in ixgbe_request_msix_irqs()
2749 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_request_msix_irqs()
2750 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_request_msix_irqs()
2751 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_request_msix_irqs()
2775 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_request_msix_irqs()
2782 err = request_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
2783 ixgbe_msix_other, 0, netdev->name, adapter); in ixgbe_request_msix_irqs()
2794 irq_set_affinity_hint(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
2796 free_irq(adapter->msix_entries[vector].vector, in ixgbe_request_msix_irqs()
2797 adapter->q_vector[vector]); in ixgbe_request_msix_irqs()
2799 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; in ixgbe_request_msix_irqs()
2800 pci_disable_msix(adapter->pdev); in ixgbe_request_msix_irqs()
2801 kfree(adapter->msix_entries); in ixgbe_request_msix_irqs()
2802 adapter->msix_entries = NULL; in ixgbe_request_msix_irqs()
2813 struct ixgbe_adapter *adapter = data; in ixgbe_intr() local
2814 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_intr()
2815 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_intr()
2835 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
2836 ixgbe_irq_enable(adapter, true, true); in ixgbe_intr()
2841 ixgbe_check_lsc(adapter); in ixgbe_intr()
2845 ixgbe_check_sfp_event(adapter, eicr); in ixgbe_intr()
2852 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; in ixgbe_intr()
2853 ixgbe_service_event_schedule(adapter); in ixgbe_intr()
2856 ixgbe_check_overtemp_event(adapter, eicr); in ixgbe_intr()
2862 ixgbe_check_fan_failure(adapter, eicr); in ixgbe_intr()
2864 ixgbe_ptp_check_pps_event(adapter, eicr); in ixgbe_intr()
2873 if (!test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_intr()
2874 ixgbe_irq_enable(adapter, false, false); in ixgbe_intr()
2886 static int ixgbe_request_irq(struct ixgbe_adapter *adapter) in ixgbe_request_irq() argument
2888 struct net_device *netdev = adapter->netdev; in ixgbe_request_irq()
2891 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_request_irq()
2892 err = ixgbe_request_msix_irqs(adapter); in ixgbe_request_irq()
2893 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) in ixgbe_request_irq()
2894 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, in ixgbe_request_irq()
2895 netdev->name, adapter); in ixgbe_request_irq()
2897 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, in ixgbe_request_irq()
2898 netdev->name, adapter); in ixgbe_request_irq()
2906 static void ixgbe_free_irq(struct ixgbe_adapter *adapter) in ixgbe_free_irq() argument
2910 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_free_irq()
2911 free_irq(adapter->pdev->irq, adapter); in ixgbe_free_irq()
2915 for (vector = 0; vector < adapter->num_q_vectors; vector++) { in ixgbe_free_irq()
2916 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; in ixgbe_free_irq()
2917 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbe_free_irq()
2929 free_irq(adapter->msix_entries[vector++].vector, adapter); in ixgbe_free_irq()
2936 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) in ixgbe_irq_disable() argument
2938 switch (adapter->hw.mac.type) { in ixgbe_irq_disable()
2940 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); in ixgbe_irq_disable()
2946 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); in ixgbe_irq_disable()
2947 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); in ixgbe_irq_disable()
2948 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); in ixgbe_irq_disable()
2953 IXGBE_WRITE_FLUSH(&adapter->hw); in ixgbe_irq_disable()
2954 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_irq_disable()
2957 for (vector = 0; vector < adapter->num_q_vectors; vector++) in ixgbe_irq_disable()
2958 synchronize_irq(adapter->msix_entries[vector].vector); in ixgbe_irq_disable()
2960 synchronize_irq(adapter->msix_entries[vector++].vector); in ixgbe_irq_disable()
2962 synchronize_irq(adapter->pdev->irq); in ixgbe_irq_disable()
2970 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) in ixgbe_configure_msi_and_legacy() argument
2972 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; in ixgbe_configure_msi_and_legacy()
2976 ixgbe_set_ivar(adapter, 0, 0, 0); in ixgbe_configure_msi_and_legacy()
2977 ixgbe_set_ivar(adapter, 1, 0, 0); in ixgbe_configure_msi_and_legacy()
2989 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, in ixgbe_configure_tx_ring() argument
2992 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx_ring()
3009 ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); in ixgbe_configure_tx_ring()
3034 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure_tx_ring()
3035 ring->atr_sample_rate = adapter->atr_sample_rate; in ixgbe_configure_tx_ring()
3071 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) in ixgbe_setup_mtqc() argument
3073 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mtqc()
3075 u8 tcs = netdev_get_num_tc(adapter->netdev); in ixgbe_setup_mtqc()
3086 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mtqc()
3092 else if (adapter->ring_feature[RING_F_RSS].indices == 4) in ixgbe_setup_mtqc()
3125 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) in ixgbe_configure_tx() argument
3127 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_tx()
3131 ixgbe_setup_mtqc(adapter); in ixgbe_configure_tx()
3141 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_configure_tx()
3142 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbe_configure_tx()
3145 static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter, in ixgbe_enable_rx_drop() argument
3148 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_enable_rx_drop()
3157 static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, in ixgbe_disable_rx_drop() argument
3160 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx_drop()
3170 void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) in ixgbe_set_rx_drop_en() argument
3172 static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) in ixgbe_set_rx_drop_en()
3176 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_set_rx_drop_en()
3178 if (adapter->ixgbe_ieee_pfc) in ixgbe_set_rx_drop_en()
3179 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_set_rx_drop_en()
3190 if (adapter->num_vfs || (adapter->num_rx_queues > 1 && in ixgbe_set_rx_drop_en()
3191 !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) { in ixgbe_set_rx_drop_en()
3192 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
3193 ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3195 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_set_rx_drop_en()
3196 ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]); in ixgbe_set_rx_drop_en()
3202 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, in ixgbe_configure_srrctl() argument
3205 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_srrctl()
3210 u16 mask = adapter->ring_feature[RING_F_RSS].mask; in ixgbe_configure_srrctl()
3240 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) in ixgbe_rss_indir_tbl_entries() argument
3242 if (adapter->hw.mac.type < ixgbe_mac_X550) in ixgbe_rss_indir_tbl_entries()
3244 else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_rss_indir_tbl_entries()
3257 static void ixgbe_store_reta(struct ixgbe_adapter *adapter) in ixgbe_store_reta() argument
3259 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); in ixgbe_store_reta()
3260 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_reta()
3263 u8 *indir_tbl = adapter->rss_indir_tbl; in ixgbe_store_reta()
3271 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_store_reta()
3297 static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) in ixgbe_store_vfreta() argument
3299 u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); in ixgbe_store_vfreta()
3300 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_store_vfreta()
3302 unsigned int pf_pool = adapter->num_vfs; in ixgbe_store_vfreta()
3306 vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; in ixgbe_store_vfreta()
3315 static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) in ixgbe_setup_reta() argument
3317 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_reta()
3319 u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); in ixgbe_setup_reta()
3320 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_reta()
3326 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) in ixgbe_setup_reta()
3331 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); in ixgbe_setup_reta()
3334 memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); in ixgbe_setup_reta()
3340 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_reta()
3343 ixgbe_store_reta(adapter); in ixgbe_setup_reta()
3346 static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) in ixgbe_setup_vfreta() argument
3348 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_vfreta()
3349 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_vfreta()
3350 unsigned int pf_pool = adapter->num_vfs; in ixgbe_setup_vfreta()
3356 adapter->rss_key[i]); in ixgbe_setup_vfreta()
3363 adapter->rss_indir_tbl[i] = j; in ixgbe_setup_vfreta()
3366 ixgbe_store_vfreta(adapter); in ixgbe_setup_vfreta()
3369 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) in ixgbe_setup_mrqc() argument
3371 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_mrqc()
3380 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_mrqc()
3381 if (adapter->ring_feature[RING_F_RSS].mask) in ixgbe_setup_mrqc()
3384 u8 tcs = netdev_get_num_tc(adapter->netdev); in ixgbe_setup_mrqc()
3386 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_mrqc()
3391 else if (adapter->ring_feature[RING_F_RSS].indices == 4) in ixgbe_setup_mrqc()
3411 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) in ixgbe_setup_mrqc()
3413 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) in ixgbe_setup_mrqc()
3416 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); in ixgbe_setup_mrqc()
3418 (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { in ixgbe_setup_mrqc()
3419 unsigned int pf_pool = adapter->num_vfs; in ixgbe_setup_mrqc()
3426 ixgbe_setup_vfreta(adapter); in ixgbe_setup_mrqc()
3431 ixgbe_setup_reta(adapter); in ixgbe_setup_mrqc()
3442 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, in ixgbe_configure_rscctl() argument
3445 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rscctl()
3464 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, in ixgbe_rx_desc_queue_enable() argument
3467 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_rx_desc_queue_enable()
3490 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, in ixgbe_disable_rx_queue() argument
3493 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_disable_rx_queue()
3522 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, in ixgbe_configure_rx_ring() argument
3525 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx_ring()
3532 ixgbe_disable_rx_queue(adapter, ring); in ixgbe_configure_rx_ring()
3540 ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); in ixgbe_configure_rx_ring()
3542 ixgbe_configure_srrctl(adapter, ring); in ixgbe_configure_rx_ring()
3543 ixgbe_configure_rscctl(adapter, ring); in ixgbe_configure_rx_ring()
3561 ixgbe_rx_desc_queue_enable(adapter, ring); in ixgbe_configure_rx_ring()
3565 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) in ixgbe_setup_psrtype() argument
3567 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_psrtype()
3568 int rss_i = adapter->ring_feature[RING_F_RSS].indices; in ixgbe_setup_psrtype()
3586 for_each_set_bit(pool, &adapter->fwd_bitmask, 32) in ixgbe_setup_psrtype()
3590 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) in ixgbe_configure_virtualization() argument
3592 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_virtualization()
3597 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_configure_virtualization()
3615 if (adapter->bridge_mode == BRIDGE_MODE_VEB) in ixgbe_configure_virtualization()
3625 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_configure_virtualization()
3641 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), in ixgbe_configure_virtualization()
3642 adapter->num_vfs); in ixgbe_configure_virtualization()
3654 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_configure_virtualization()
3655 if (!adapter->vfinfo[i].spoofchk_enabled) in ixgbe_configure_virtualization()
3656 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); in ixgbe_configure_virtualization()
3663 ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, in ixgbe_configure_virtualization()
3664 adapter->vfinfo[i].rss_query_enabled); in ixgbe_configure_virtualization()
3668 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) in ixgbe_set_rx_buffer_len() argument
3670 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_buffer_len()
3671 struct net_device *netdev = adapter->netdev; in ixgbe_set_rx_buffer_len()
3679 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && in ixgbe_set_rx_buffer_len()
3706 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_set_rx_buffer_len()
3707 rx_ring = adapter->rx_ring[i]; in ixgbe_set_rx_buffer_len()
3708 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_rx_buffer_len()
3715 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) in ixgbe_setup_rdrxctl() argument
3717 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_rdrxctl()
3760 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) in ixgbe_configure_rx() argument
3762 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_rx()
3769 ixgbe_setup_psrtype(adapter); in ixgbe_configure_rx()
3770 ixgbe_setup_rdrxctl(adapter); in ixgbe_configure_rx()
3775 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) in ixgbe_configure_rx()
3780 ixgbe_setup_mrqc(adapter); in ixgbe_configure_rx()
3783 ixgbe_set_rx_buffer_len(adapter); in ixgbe_configure_rx()
3789 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_configure_rx()
3790 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); in ixgbe_configure_rx()
3805 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_vlan_rx_add_vid() local
3806 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_add_vid()
3809 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); in ixgbe_vlan_rx_add_vid()
3810 set_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_add_vid()
3818 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_vlan_rx_kill_vid() local
3819 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_rx_kill_vid()
3822 hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); in ixgbe_vlan_rx_kill_vid()
3823 clear_bit(vid, adapter->active_vlans); in ixgbe_vlan_rx_kill_vid()
3832 static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) in ixgbe_vlan_strip_disable() argument
3834 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_disable()
3848 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_disable()
3849 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_disable()
3868 static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) in ixgbe_vlan_strip_enable() argument
3870 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vlan_strip_enable()
3884 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_vlan_strip_enable()
3885 struct ixgbe_ring *ring = adapter->rx_ring[i]; in ixgbe_vlan_strip_enable()
3900 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) in ixgbe_restore_vlan() argument
3904 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); in ixgbe_restore_vlan()
3906 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in ixgbe_restore_vlan()
3907 ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in ixgbe_restore_vlan()
3921 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_write_mc_addr_list() local
3922 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_write_mc_addr_list()
3933 ixgbe_restore_vf_multicasts(adapter); in ixgbe_write_mc_addr_list()
3940 void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) in ixgbe_full_sync_mac_table() argument
3942 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_full_sync_mac_table()
3945 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) in ixgbe_full_sync_mac_table()
3946 hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, in ixgbe_full_sync_mac_table()
3947 adapter->mac_table[i].queue, in ixgbe_full_sync_mac_table()
3952 adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); in ixgbe_full_sync_mac_table()
3957 static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) in ixgbe_sync_mac_table() argument
3959 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sync_mac_table()
3962 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { in ixgbe_sync_mac_table()
3963 if (adapter->mac_table[i].state & in ixgbe_sync_mac_table()
3966 adapter->mac_table[i].addr, in ixgbe_sync_mac_table()
3967 adapter->mac_table[i].queue, in ixgbe_sync_mac_table()
3972 adapter->mac_table[i].state &= in ixgbe_sync_mac_table()
3978 static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) in ixgbe_flush_sw_mac_table() argument
3981 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_flush_sw_mac_table()
3984 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_flush_sw_mac_table()
3985 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_flush_sw_mac_table()
3986 eth_zero_addr(adapter->mac_table[i].addr); in ixgbe_flush_sw_mac_table()
3987 adapter->mac_table[i].queue = 0; in ixgbe_flush_sw_mac_table()
3989 ixgbe_sync_mac_table(adapter); in ixgbe_flush_sw_mac_table()
3992 static int ixgbe_available_rars(struct ixgbe_adapter *adapter) in ixgbe_available_rars() argument
3994 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_available_rars()
3998 if (adapter->mac_table[i].state == 0) in ixgbe_available_rars()
4005 static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, in ixgbe_mac_set_default_filter() argument
4008 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mac_set_default_filter()
4010 memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); in ixgbe_mac_set_default_filter()
4011 adapter->mac_table[0].queue = VMDQ_P(0); in ixgbe_mac_set_default_filter()
4012 adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | in ixgbe_mac_set_default_filter()
4014 hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, in ixgbe_mac_set_default_filter()
4015 adapter->mac_table[0].queue, in ixgbe_mac_set_default_filter()
4019 int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) in ixgbe_add_mac_filter() argument
4021 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_mac_filter()
4028 if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) in ixgbe_add_mac_filter()
4030 adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | in ixgbe_add_mac_filter()
4032 ether_addr_copy(adapter->mac_table[i].addr, addr); in ixgbe_add_mac_filter()
4033 adapter->mac_table[i].queue = queue; in ixgbe_add_mac_filter()
4034 ixgbe_sync_mac_table(adapter); in ixgbe_add_mac_filter()
4040 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) in ixgbe_del_mac_filter() argument
4044 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_del_mac_filter()
4050 if (ether_addr_equal(addr, adapter->mac_table[i].addr) && in ixgbe_del_mac_filter()
4051 adapter->mac_table[i].queue == queue) { in ixgbe_del_mac_filter()
4052 adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; in ixgbe_del_mac_filter()
4053 adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; in ixgbe_del_mac_filter()
4054 eth_zero_addr(adapter->mac_table[i].addr); in ixgbe_del_mac_filter()
4055 adapter->mac_table[i].queue = 0; in ixgbe_del_mac_filter()
4056 ixgbe_sync_mac_table(adapter); in ixgbe_del_mac_filter()
4073 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_write_uc_addr_list() local
4077 if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) in ixgbe_write_uc_addr_list()
4083 ixgbe_del_mac_filter(adapter, ha->addr, vfn); in ixgbe_write_uc_addr_list()
4084 ixgbe_add_mac_filter(adapter, ha->addr, vfn); in ixgbe_write_uc_addr_list()
4102 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_set_rx_mode() local
4103 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_rx_mode()
4129 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | in ixgbe_set_rx_mode()
4172 if (adapter->netdev->features & NETIF_F_RXALL) { in ixgbe_set_rx_mode()
4187 ixgbe_vlan_strip_enable(adapter); in ixgbe_set_rx_mode()
4189 ixgbe_vlan_strip_disable(adapter); in ixgbe_set_rx_mode()
4192 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) in ixgbe_napi_enable_all() argument
4196 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { in ixgbe_napi_enable_all()
4197 ixgbe_qv_init_lock(adapter->q_vector[q_idx]); in ixgbe_napi_enable_all()
4198 napi_enable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_enable_all()
4202 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) in ixgbe_napi_disable_all() argument
4206 for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { in ixgbe_napi_disable_all()
4207 napi_disable(&adapter->q_vector[q_idx]->napi); in ixgbe_napi_disable_all()
4208 while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { in ixgbe_napi_disable_all()
4224 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) in ixgbe_configure_dcb() argument
4226 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_dcb()
4227 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbe_configure_dcb()
4229 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { in ixgbe_configure_dcb()
4231 netif_set_gso_max_size(adapter->netdev, 65536); in ixgbe_configure_dcb()
4236 netif_set_gso_max_size(adapter->netdev, 32768); in ixgbe_configure_dcb()
4239 if (adapter->netdev->features & NETIF_F_FCOE_MTU) in ixgbe_configure_dcb()
4244 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { in ixgbe_configure_dcb()
4245 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
4247 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, in ixgbe_configure_dcb()
4249 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); in ixgbe_configure_dcb()
4250 } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { in ixgbe_configure_dcb()
4251 ixgbe_dcb_hw_ets(&adapter->hw, in ixgbe_configure_dcb()
4252 adapter->ixgbe_ieee_ets, in ixgbe_configure_dcb()
4254 ixgbe_dcb_hw_pfc_config(&adapter->hw, in ixgbe_configure_dcb()
4255 adapter->ixgbe_ieee_pfc->pfc_en, in ixgbe_configure_dcb()
4256 adapter->ixgbe_ieee_ets->prio_tc); in ixgbe_configure_dcb()
4262 u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; in ixgbe_configure_dcb()
4284 static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) in ixgbe_hpbthresh() argument
4286 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_hpbthresh()
4287 struct net_device *dev = adapter->netdev; in ixgbe_hpbthresh()
4298 (pb == ixgbe_fcoe_get_tc(adapter))) in ixgbe_hpbthresh()
4315 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_hpbthresh()
4344 static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) in ixgbe_lpbthresh() argument
4346 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_lpbthresh()
4347 struct net_device *dev = adapter->netdev; in ixgbe_lpbthresh()
4358 (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) in ixgbe_lpbthresh()
4381 static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) in ixgbe_pbthresh_setup() argument
4383 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_pbthresh_setup()
4384 int num_tc = netdev_get_num_tc(adapter->netdev); in ixgbe_pbthresh_setup()
4391 hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); in ixgbe_pbthresh_setup()
4392 hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); in ixgbe_pbthresh_setup()
4403 static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) in ixgbe_configure_pb() argument
4405 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_pb()
4407 u8 tc = netdev_get_num_tc(adapter->netdev); in ixgbe_configure_pb()
4409 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || in ixgbe_configure_pb()
4410 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_configure_pb()
4411 hdrm = 32 << adapter->fdir_pballoc; in ixgbe_configure_pb()
4416 ixgbe_pbthresh_setup(adapter); in ixgbe_configure_pb()
4419 static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) in ixgbe_fdir_filter_restore() argument
4421 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_filter_restore()
4425 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
4427 if (!hlist_empty(&adapter->fdir_filter_list)) in ixgbe_fdir_filter_restore()
4428 ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); in ixgbe_fdir_filter_restore()
4431 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_restore()
4437 adapter->rx_ring[filter->action]->reg_idx); in ixgbe_fdir_filter_restore()
4440 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_restore()
4444 struct ixgbe_adapter *adapter) in ixgbe_macvlan_set_rx_mode() argument
4446 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_macvlan_set_rx_mode()
4462 ixgbe_write_uc_addr_list(adapter->netdev, pool); in ixgbe_macvlan_set_rx_mode()
4468 struct ixgbe_adapter *adapter = vadapter->real_adapter; in ixgbe_fwd_psrtype() local
4469 int rss_i = adapter->num_rx_queues_per_pool; in ixgbe_fwd_psrtype()
4470 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fwd_psrtype()
4542 struct ixgbe_adapter *adapter = vadapter->real_adapter; in ixgbe_disable_fwd_ring() local
4546 ixgbe_disable_rx_queue(adapter, rx_ring); in ixgbe_disable_fwd_ring()
4548 ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); in ixgbe_disable_fwd_ring()
4556 struct ixgbe_adapter *adapter = accel->real_adapter; in ixgbe_fwd_ring_down() local
4563 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_ring_down()
4564 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_down()
4565 adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4568 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_ring_down()
4569 adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; in ixgbe_fwd_ring_down()
4570 adapter->tx_ring[txbase + i]->netdev = adapter->netdev; in ixgbe_fwd_ring_down()
4580 struct ixgbe_adapter *adapter = accel->real_adapter; in ixgbe_fwd_ring_up() local
4584 if (!test_bit(accel->pool, &adapter->fwd_bitmask)) in ixgbe_fwd_ring_up()
4587 baseq = accel->pool * adapter->num_rx_queues_per_pool; in ixgbe_fwd_ring_up()
4589 accel->pool, adapter->num_rx_pools, in ixgbe_fwd_ring_up()
4590 baseq, baseq + adapter->num_rx_queues_per_pool, in ixgbe_fwd_ring_up()
4591 adapter->fwd_bitmask); in ixgbe_fwd_ring_up()
4597 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) in ixgbe_fwd_ring_up()
4598 ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
4600 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_ring_up()
4601 adapter->rx_ring[rxbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4602 adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
4603 ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); in ixgbe_fwd_ring_up()
4606 for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { in ixgbe_fwd_ring_up()
4607 adapter->tx_ring[txbase + i]->netdev = vdev; in ixgbe_fwd_ring_up()
4608 adapter->tx_ring[txbase + i]->l2_accel_priv = accel; in ixgbe_fwd_ring_up()
4612 adapter->num_rx_queues_per_pool, vdev->num_tx_queues); in ixgbe_fwd_ring_up()
4622 ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); in ixgbe_fwd_ring_up()
4625 ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); in ixgbe_fwd_ring_up()
4632 static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) in ixgbe_configure_dfwd() argument
4638 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { in ixgbe_configure_dfwd()
4652 static void ixgbe_configure(struct ixgbe_adapter *adapter) in ixgbe_configure() argument
4654 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure()
4656 ixgbe_configure_pb(adapter); in ixgbe_configure()
4658 ixgbe_configure_dcb(adapter); in ixgbe_configure()
4664 ixgbe_configure_virtualization(adapter); in ixgbe_configure()
4666 ixgbe_set_rx_mode(adapter->netdev); in ixgbe_configure()
4667 ixgbe_restore_vlan(adapter); in ixgbe_configure()
4678 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { in ixgbe_configure()
4679 ixgbe_init_fdir_signature_82599(&adapter->hw, in ixgbe_configure()
4680 adapter->fdir_pballoc); in ixgbe_configure()
4681 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { in ixgbe_configure()
4682 ixgbe_init_fdir_perfect_82599(&adapter->hw, in ixgbe_configure()
4683 adapter->fdir_pballoc); in ixgbe_configure()
4684 ixgbe_fdir_filter_restore(adapter); in ixgbe_configure()
4698 ixgbe_configure_fcoe(adapter); in ixgbe_configure()
4701 ixgbe_configure_tx(adapter); in ixgbe_configure()
4702 ixgbe_configure_rx(adapter); in ixgbe_configure()
4703 ixgbe_configure_dfwd(adapter); in ixgbe_configure()
4736 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) in ixgbe_sfp_link_config() argument
4744 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_sfp_link_config()
4745 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_sfp_link_config()
4747 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_link_config()
4781 static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) in ixgbe_setup_gpie() argument
4783 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_gpie()
4786 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { in ixgbe_setup_gpie()
4816 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_setup_gpie()
4819 switch (adapter->ring_feature[RING_F_VMDQ].mask) { in ixgbe_setup_gpie()
4833 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { in ixgbe_setup_gpie()
4834 switch (adapter->hw.mac.type) { in ixgbe_setup_gpie()
4847 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) in ixgbe_setup_gpie()
4858 static void ixgbe_up_complete(struct ixgbe_adapter *adapter) in ixgbe_up_complete() argument
4860 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_up_complete()
4864 ixgbe_get_hw_control(adapter); in ixgbe_up_complete()
4865 ixgbe_setup_gpie(adapter); in ixgbe_up_complete()
4867 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) in ixgbe_up_complete()
4868 ixgbe_configure_msix(adapter); in ixgbe_up_complete()
4870 ixgbe_configure_msi_and_legacy(adapter); in ixgbe_up_complete()
4877 clear_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_up_complete()
4878 ixgbe_napi_enable_all(adapter); in ixgbe_up_complete()
4881 ixgbe_sfp_link_config(adapter); in ixgbe_up_complete()
4890 ixgbe_irq_enable(adapter, true, true); in ixgbe_up_complete()
4896 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_up_complete()
4904 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_up_complete()
4905 adapter->link_check_timeout = jiffies; in ixgbe_up_complete()
4906 mod_timer(&adapter->service_timer, jiffies); in ixgbe_up_complete()
4914 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) in ixgbe_reinit_locked() argument
4918 adapter->netdev->trans_start = jiffies; in ixgbe_reinit_locked()
4920 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_reinit_locked()
4922 ixgbe_down(adapter); in ixgbe_reinit_locked()
4929 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_reinit_locked()
4931 ixgbe_up(adapter); in ixgbe_reinit_locked()
4932 clear_bit(__IXGBE_RESETTING, &adapter->state); in ixgbe_reinit_locked()
4935 void ixgbe_up(struct ixgbe_adapter *adapter) in ixgbe_up() argument
4938 ixgbe_configure(adapter); in ixgbe_up()
4940 ixgbe_up_complete(adapter); in ixgbe_up()
4943 void ixgbe_reset(struct ixgbe_adapter *adapter) in ixgbe_reset() argument
4945 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_reset()
4946 struct net_device *netdev = adapter->netdev; in ixgbe_reset()
4953 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_reset()
4957 adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP | in ixgbe_reset()
4959 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_reset()
4983 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_reset()
4985 memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); in ixgbe_reset()
4986 ixgbe_flush_sw_mac_table(adapter); in ixgbe_reset()
4987 ixgbe_mac_set_default_filter(adapter, old_addr); in ixgbe_reset()
4993 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_reset()
4994 ixgbe_ptp_reset(adapter); in ixgbe_reset()
5033 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) in ixgbe_clean_all_rx_rings() argument
5037 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_clean_all_rx_rings()
5038 ixgbe_clean_rx_ring(adapter->rx_ring[i]); in ixgbe_clean_all_rx_rings()
5045 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) in ixgbe_clean_all_tx_rings() argument
5049 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_clean_all_tx_rings()
5050 ixgbe_clean_tx_ring(adapter->tx_ring[i]); in ixgbe_clean_all_tx_rings()
5053 static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter) in ixgbe_fdir_filter_exit() argument
5058 spin_lock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
5061 &adapter->fdir_filter_list, fdir_node) { in ixgbe_fdir_filter_exit()
5065 adapter->fdir_filter_count = 0; in ixgbe_fdir_filter_exit()
5067 spin_unlock(&adapter->fdir_perfect_lock); in ixgbe_fdir_filter_exit()
5070 void ixgbe_down(struct ixgbe_adapter *adapter) in ixgbe_down() argument
5072 struct net_device *netdev = adapter->netdev; in ixgbe_down()
5073 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_down()
5079 if (test_and_set_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_down()
5086 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_down()
5088 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbe_down()
5099 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { in ixgbe_down()
5111 ixgbe_irq_disable(adapter); in ixgbe_down()
5113 ixgbe_napi_disable_all(adapter); in ixgbe_down()
5115 adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | in ixgbe_down()
5117 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_down()
5119 del_timer_sync(&adapter->service_timer); in ixgbe_down()
5121 if (adapter->num_vfs) { in ixgbe_down()
5123 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); in ixgbe_down()
5126 for (i = 0 ; i < adapter->num_vfs; i++) in ixgbe_down()
5127 adapter->vfinfo[i].clear_to_send = false; in ixgbe_down()
5130 ixgbe_ping_all_vfs(adapter); in ixgbe_down()
5133 ixgbe_disable_tx_rx(adapter); in ixgbe_down()
5137 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_down()
5138 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbe_down()
5156 if (!pci_channel_offline(adapter->pdev)) in ixgbe_down()
5157 ixgbe_reset(adapter); in ixgbe_down()
5163 ixgbe_clean_all_tx_rings(adapter); in ixgbe_down()
5164 ixgbe_clean_all_rx_rings(adapter); in ixgbe_down()
5168 ixgbe_setup_dca(adapter); in ixgbe_down()
5178 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_tx_timeout() local
5181 ixgbe_tx_timeout_reset(adapter); in ixgbe_tx_timeout()
5192 static int ixgbe_sw_init(struct ixgbe_adapter *adapter) in ixgbe_sw_init() argument
5194 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sw_init()
5195 struct pci_dev *pdev = adapter->pdev; in ixgbe_sw_init()
5212 rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus()); in ixgbe_sw_init()
5213 adapter->ring_feature[RING_F_RSS].limit = rss; in ixgbe_sw_init()
5214 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
5215 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; in ixgbe_sw_init()
5216 adapter->max_q_vectors = MAX_Q_VECTORS_82599; in ixgbe_sw_init()
5217 adapter->atr_sample_rate = 20; in ixgbe_sw_init()
5219 adapter->ring_feature[RING_F_FDIR].limit = fdir; in ixgbe_sw_init()
5220 adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; in ixgbe_sw_init()
5222 adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
5225 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
5226 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
5229 adapter->fcoe.up = IXGBE_FCOE_DEFTC; in ixgbe_sw_init()
5233 adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * in ixgbe_sw_init()
5240 adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; in ixgbe_sw_init()
5241 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; in ixgbe_sw_init()
5244 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; in ixgbe_sw_init()
5246 adapter->max_q_vectors = MAX_Q_VECTORS_82598; in ixgbe_sw_init()
5247 adapter->ring_feature[RING_F_FDIR].limit = 0; in ixgbe_sw_init()
5248 adapter->atr_sample_rate = 0; in ixgbe_sw_init()
5249 adapter->fdir_pballoc = 0; in ixgbe_sw_init()
5251 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_sw_init()
5252 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; in ixgbe_sw_init()
5254 adapter->fcoe.up = 0; in ixgbe_sw_init()
5260 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
5265 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; in ixgbe_sw_init()
5270 adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; in ixgbe_sw_init()
5279 spin_lock_init(&adapter->fcoe.lock); in ixgbe_sw_init()
5283 spin_lock_init(&adapter->fdir_perfect_lock); in ixgbe_sw_init()
5290 adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; in ixgbe_sw_init()
5291 adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; in ixgbe_sw_init()
5294 adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; in ixgbe_sw_init()
5295 adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; in ixgbe_sw_init()
5301 tc = &adapter->dcb_cfg.tc_config[j]; in ixgbe_sw_init()
5310 tc = &adapter->dcb_cfg.tc_config[0]; in ixgbe_sw_init()
5314 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; in ixgbe_sw_init()
5315 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; in ixgbe_sw_init()
5316 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_sw_init()
5317 adapter->dcb_set_bitmap = 0x00; in ixgbe_sw_init()
5318 adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; in ixgbe_sw_init()
5319 memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, in ixgbe_sw_init()
5320 sizeof(adapter->temp_dcb_cfg)); in ixgbe_sw_init()
5327 ixgbe_pbthresh_setup(adapter); in ixgbe_sw_init()
5339 adapter->num_vfs = 0; in ixgbe_sw_init()
5342 adapter->num_vfs = max_vfs; in ixgbe_sw_init()
5348 adapter->rx_itr_setting = 1; in ixgbe_sw_init()
5349 adapter->tx_itr_setting = 1; in ixgbe_sw_init()
5352 adapter->tx_ring_count = IXGBE_DEFAULT_TXD; in ixgbe_sw_init()
5353 adapter->rx_ring_count = IXGBE_DEFAULT_RXD; in ixgbe_sw_init()
5356 adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; in ixgbe_sw_init()
5365 set_bit(0, &adapter->fwd_bitmask); in ixgbe_sw_init()
5366 set_bit(__IXGBE_DOWN, &adapter->state); in ixgbe_sw_init()
5434 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) in ixgbe_setup_all_tx_resources() argument
5438 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_setup_all_tx_resources()
5439 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5451 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_setup_all_tx_resources()
5518 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) in ixgbe_setup_all_rx_resources() argument
5522 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_setup_all_rx_resources()
5523 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5532 err = ixgbe_setup_fcoe_ddp_resources(adapter); in ixgbe_setup_all_rx_resources()
5539 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_setup_all_rx_resources()
5572 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) in ixgbe_free_all_tx_resources() argument
5576 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_free_all_tx_resources()
5577 if (adapter->tx_ring[i]->desc) in ixgbe_free_all_tx_resources()
5578 ixgbe_free_tx_resources(adapter->tx_ring[i]); in ixgbe_free_all_tx_resources()
5610 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) in ixgbe_free_all_rx_resources() argument
5615 ixgbe_free_fcoe_ddp_resources(adapter); in ixgbe_free_all_rx_resources()
5618 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbe_free_all_rx_resources()
5619 if (adapter->rx_ring[i]->desc) in ixgbe_free_all_rx_resources()
5620 ixgbe_free_rx_resources(adapter->rx_ring[i]); in ixgbe_free_all_rx_resources()
5632 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_change_mtu() local
5644 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && in ixgbe_change_mtu()
5645 (adapter->hw.mac.type == ixgbe_mac_82599EB) && in ixgbe_change_mtu()
5655 ixgbe_reinit_locked(adapter); in ixgbe_change_mtu()
5674 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_open() local
5678 if (test_bit(__IXGBE_TESTING, &adapter->state)) in ixgbe_open()
5684 err = ixgbe_setup_all_tx_resources(adapter); in ixgbe_open()
5689 err = ixgbe_setup_all_rx_resources(adapter); in ixgbe_open()
5693 ixgbe_configure(adapter); in ixgbe_open()
5695 err = ixgbe_request_irq(adapter); in ixgbe_open()
5700 if (adapter->num_rx_pools > 1) in ixgbe_open()
5701 queues = adapter->num_rx_queues_per_pool; in ixgbe_open()
5703 queues = adapter->num_tx_queues; in ixgbe_open()
5709 if (adapter->num_rx_pools > 1 && in ixgbe_open()
5710 adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) in ixgbe_open()
5713 queues = adapter->num_rx_queues; in ixgbe_open()
5718 ixgbe_ptp_init(adapter); in ixgbe_open()
5720 ixgbe_up_complete(adapter); in ixgbe_open()
5729 ixgbe_free_irq(adapter); in ixgbe_open()
5731 ixgbe_free_all_rx_resources(adapter); in ixgbe_open()
5733 ixgbe_free_all_tx_resources(adapter); in ixgbe_open()
5735 ixgbe_reset(adapter); in ixgbe_open()
5740 static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) in ixgbe_close_suspend() argument
5742 ixgbe_ptp_suspend(adapter); in ixgbe_close_suspend()
5744 ixgbe_down(adapter); in ixgbe_close_suspend()
5745 ixgbe_free_irq(adapter); in ixgbe_close_suspend()
5747 ixgbe_free_all_tx_resources(adapter); in ixgbe_close_suspend()
5748 ixgbe_free_all_rx_resources(adapter); in ixgbe_close_suspend()
5764 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_close() local
5766 ixgbe_ptp_stop(adapter); in ixgbe_close()
5768 ixgbe_close_suspend(adapter); in ixgbe_close()
5770 ixgbe_fdir_filter_exit(adapter); in ixgbe_close()
5772 ixgbe_release_hw_control(adapter); in ixgbe_close()
5780 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in ixgbe_resume() local
5781 struct net_device *netdev = adapter->netdev; in ixgbe_resume()
5784 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_resume()
5799 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_resume()
5804 ixgbe_reset(adapter); in ixgbe_resume()
5806 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_resume()
5809 err = ixgbe_init_interrupt_scheme(adapter); in ixgbe_resume()
5826 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in __ixgbe_shutdown() local
5827 struct net_device *netdev = adapter->netdev; in __ixgbe_shutdown()
5828 struct ixgbe_hw *hw = &adapter->hw; in __ixgbe_shutdown()
5830 u32 wufc = adapter->wol; in __ixgbe_shutdown()
5839 ixgbe_close_suspend(adapter); in __ixgbe_shutdown()
5842 ixgbe_clear_interrupt_scheme(adapter); in __ixgbe_shutdown()
5893 ixgbe_release_hw_control(adapter); in __ixgbe_shutdown()
5895 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in __ixgbe_shutdown()
5938 void ixgbe_update_stats(struct ixgbe_adapter *adapter) in ixgbe_update_stats() argument
5940 struct net_device *netdev = adapter->netdev; in ixgbe_update_stats()
5941 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_update_stats()
5942 struct ixgbe_hw_stats *hwstats = &adapter->stats; in ixgbe_update_stats()
5949 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_update_stats()
5950 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_update_stats()
5953 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { in ixgbe_update_stats()
5956 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
5957 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; in ixgbe_update_stats()
5958 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; in ixgbe_update_stats()
5960 adapter->rsc_total_count = rsc_count; in ixgbe_update_stats()
5961 adapter->rsc_total_flush = rsc_flush; in ixgbe_update_stats()
5964 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_update_stats()
5965 struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; in ixgbe_update_stats()
5973 adapter->non_eop_descs = non_eop_descs; in ixgbe_update_stats()
5974 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbe_update_stats()
5975 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbe_update_stats()
5976 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbe_update_stats()
5983 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_update_stats()
5984 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_update_stats()
5990 adapter->restart_queue = restart_queue; in ixgbe_update_stats()
5991 adapter->tx_busy = tx_busy; in ixgbe_update_stats()
6045 ixgbe_update_xoff_received(adapter); in ixgbe_update_stats()
6065 adapter->hw_rx_no_dma_resources += in ixgbe_update_stats()
6084 if (adapter->fcoe.ddp_pool) { in ixgbe_update_stats()
6085 struct ixgbe_fcoe *fcoe = &adapter->fcoe; in ixgbe_update_stats()
6156 static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) in ixgbe_fdir_reinit_subtask() argument
6158 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_fdir_reinit_subtask()
6161 if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) in ixgbe_fdir_reinit_subtask()
6164 adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT; in ixgbe_fdir_reinit_subtask()
6167 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_fdir_reinit_subtask()
6171 if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) in ixgbe_fdir_reinit_subtask()
6174 adapter->fdir_overflow++; in ixgbe_fdir_reinit_subtask()
6177 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_fdir_reinit_subtask()
6179 &(adapter->tx_ring[i]->state)); in ixgbe_fdir_reinit_subtask()
6197 static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) in ixgbe_check_hang_subtask() argument
6199 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_hang_subtask()
6204 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_check_hang_subtask()
6205 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_check_hang_subtask()
6206 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_check_hang_subtask()
6210 if (netif_carrier_ok(adapter->netdev)) { in ixgbe_check_hang_subtask()
6211 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbe_check_hang_subtask()
6212 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbe_check_hang_subtask()
6215 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { in ixgbe_check_hang_subtask()
6225 for (i = 0; i < adapter->num_q_vectors; i++) { in ixgbe_check_hang_subtask()
6226 struct ixgbe_q_vector *qv = adapter->q_vector[i]; in ixgbe_check_hang_subtask()
6233 ixgbe_irq_rearm_queues(adapter, eics); in ixgbe_check_hang_subtask()
6241 static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) in ixgbe_watchdog_update_link() argument
6243 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_update_link()
6244 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_update_link()
6245 bool link_up = adapter->link_up; in ixgbe_watchdog_update_link()
6246 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; in ixgbe_watchdog_update_link()
6248 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) in ixgbe_watchdog_update_link()
6259 if (adapter->ixgbe_ieee_pfc) in ixgbe_watchdog_update_link()
6260 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); in ixgbe_watchdog_update_link()
6262 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { in ixgbe_watchdog_update_link()
6264 ixgbe_set_rx_drop_en(adapter); in ixgbe_watchdog_update_link()
6268 time_after(jiffies, (adapter->link_check_timeout + in ixgbe_watchdog_update_link()
6270 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_watchdog_update_link()
6275 adapter->link_up = link_up; in ixgbe_watchdog_update_link()
6276 adapter->link_speed = link_speed; in ixgbe_watchdog_update_link()
6279 static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) in ixgbe_update_default_up() argument
6282 struct net_device *netdev = adapter->netdev; in ixgbe_update_default_up()
6289 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) in ixgbe_update_default_up()
6292 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; in ixgbe_update_default_up()
6301 static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) in ixgbe_watchdog_link_is_up() argument
6303 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_up()
6304 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_up()
6307 u32 link_speed = adapter->link_speed; in ixgbe_watchdog_link_is_up()
6314 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_up()
6340 adapter->last_rx_ptp_check = jiffies; in ixgbe_watchdog_link_is_up()
6342 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_up()
6343 ixgbe_ptp_start_cyclecounter(adapter); in ixgbe_watchdog_link_is_up()
6358 ixgbe_check_vf_rate_limit(adapter); in ixgbe_watchdog_link_is_up()
6361 netif_tx_wake_all_queues(adapter->netdev); in ixgbe_watchdog_link_is_up()
6365 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { in ixgbe_watchdog_link_is_up()
6376 ixgbe_update_default_up(adapter); in ixgbe_watchdog_link_is_up()
6379 ixgbe_ping_all_vfs(adapter); in ixgbe_watchdog_link_is_up()
6387 static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) in ixgbe_watchdog_link_is_down() argument
6389 struct net_device *netdev = adapter->netdev; in ixgbe_watchdog_link_is_down()
6390 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_watchdog_link_is_down()
6392 adapter->link_up = false; in ixgbe_watchdog_link_is_down()
6393 adapter->link_speed = 0; in ixgbe_watchdog_link_is_down()
6401 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_watchdog_link_is_down()
6403 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) in ixgbe_watchdog_link_is_down()
6404 ixgbe_ptp_start_cyclecounter(adapter); in ixgbe_watchdog_link_is_down()
6410 ixgbe_ping_all_vfs(adapter); in ixgbe_watchdog_link_is_down()
6413 static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) in ixgbe_ring_tx_pending() argument
6417 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_ring_tx_pending()
6418 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; in ixgbe_ring_tx_pending()
6427 static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) in ixgbe_vf_tx_pending() argument
6429 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_vf_tx_pending()
6430 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; in ixgbe_vf_tx_pending()
6435 if (!adapter->num_vfs) in ixgbe_vf_tx_pending()
6442 for (i = 0; i < adapter->num_vfs; i++) { in ixgbe_vf_tx_pending()
6461 static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) in ixgbe_watchdog_flush_tx() argument
6463 if (!netif_carrier_ok(adapter->netdev)) { in ixgbe_watchdog_flush_tx()
6464 if (ixgbe_ring_tx_pending(adapter) || in ixgbe_watchdog_flush_tx()
6465 ixgbe_vf_tx_pending(adapter)) { in ixgbe_watchdog_flush_tx()
6472 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; in ixgbe_watchdog_flush_tx()
6478 static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, in ixgbe_issue_vf_flr() argument
6490 static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) in ixgbe_check_for_bad_vf() argument
6492 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_check_for_bad_vf()
6493 struct pci_dev *pdev = adapter->pdev; in ixgbe_check_for_bad_vf()
6499 if (!(netif_carrier_ok(adapter->netdev))) in ixgbe_check_for_bad_vf()
6530 ixgbe_issue_vf_flr(adapter, vfdev); in ixgbe_check_for_bad_vf()
6537 static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) in ixgbe_spoof_check() argument
6542 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_spoof_check()
6543 adapter->num_vfs == 0) in ixgbe_spoof_check()
6546 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); in ixgbe_spoof_check()
6558 static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter) in ixgbe_spoof_check() argument
6563 ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter) in ixgbe_check_for_bad_vf() argument
6573 static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) in ixgbe_watchdog_subtask() argument
6576 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_watchdog_subtask()
6577 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_watchdog_subtask()
6578 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_watchdog_subtask()
6581 ixgbe_watchdog_update_link(adapter); in ixgbe_watchdog_subtask()
6583 if (adapter->link_up) in ixgbe_watchdog_subtask()
6584 ixgbe_watchdog_link_is_up(adapter); in ixgbe_watchdog_subtask()
6586 ixgbe_watchdog_link_is_down(adapter); in ixgbe_watchdog_subtask()
6588 ixgbe_check_for_bad_vf(adapter); in ixgbe_watchdog_subtask()
6589 ixgbe_spoof_check(adapter); in ixgbe_watchdog_subtask()
6590 ixgbe_update_stats(adapter); in ixgbe_watchdog_subtask()
6592 ixgbe_watchdog_flush_tx(adapter); in ixgbe_watchdog_subtask()
6599 static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) in ixgbe_sfp_detection_subtask() argument
6601 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_detection_subtask()
6605 if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && in ixgbe_sfp_detection_subtask()
6606 !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
6610 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_detection_subtask()
6620 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
6628 if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) in ixgbe_sfp_detection_subtask()
6631 adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET; in ixgbe_sfp_detection_subtask()
6646 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_detection_subtask()
6650 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_detection_subtask()
6653 (adapter->netdev->reg_state == NETREG_REGISTERED)) { in ixgbe_sfp_detection_subtask()
6658 unregister_netdev(adapter->netdev); in ixgbe_sfp_detection_subtask()
6666 static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) in ixgbe_sfp_link_config_subtask() argument
6668 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_sfp_link_config_subtask()
6672 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG)) in ixgbe_sfp_link_config_subtask()
6676 if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) in ixgbe_sfp_link_config_subtask()
6679 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; in ixgbe_sfp_link_config_subtask()
6695 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; in ixgbe_sfp_link_config_subtask()
6696 adapter->link_check_timeout = jiffies; in ixgbe_sfp_link_config_subtask()
6697 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); in ixgbe_sfp_link_config_subtask()
6706 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; in ixgbe_service_timer() local
6710 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) in ixgbe_service_timer()
6716 mod_timer(&adapter->service_timer, next_event_offset + jiffies); in ixgbe_service_timer()
6718 ixgbe_service_event_schedule(adapter); in ixgbe_service_timer()
6721 static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) in ixgbe_reset_subtask() argument
6723 if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) in ixgbe_reset_subtask()
6726 adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; in ixgbe_reset_subtask()
6729 if (test_bit(__IXGBE_DOWN, &adapter->state) || in ixgbe_reset_subtask()
6730 test_bit(__IXGBE_REMOVING, &adapter->state) || in ixgbe_reset_subtask()
6731 test_bit(__IXGBE_RESETTING, &adapter->state)) in ixgbe_reset_subtask()
6734 ixgbe_dump(adapter); in ixgbe_reset_subtask()
6735 netdev_err(adapter->netdev, "Reset adapter\n"); in ixgbe_reset_subtask()
6736 adapter->tx_timeout_count++; in ixgbe_reset_subtask()
6739 ixgbe_reinit_locked(adapter); in ixgbe_reset_subtask()
6749 struct ixgbe_adapter *adapter = container_of(work, in ixgbe_service_task() local
6752 if (ixgbe_removed(adapter->hw.hw_addr)) { in ixgbe_service_task()
6753 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { in ixgbe_service_task()
6755 ixgbe_down(adapter); in ixgbe_service_task()
6758 ixgbe_service_event_complete(adapter); in ixgbe_service_task()
6761 ixgbe_reset_subtask(adapter); in ixgbe_service_task()
6762 ixgbe_sfp_detection_subtask(adapter); in ixgbe_service_task()
6763 ixgbe_sfp_link_config_subtask(adapter); in ixgbe_service_task()
6764 ixgbe_check_overtemp_subtask(adapter); in ixgbe_service_task()
6765 ixgbe_watchdog_subtask(adapter); in ixgbe_service_task()
6766 ixgbe_fdir_reinit_subtask(adapter); in ixgbe_service_task()
6767 ixgbe_check_hang_subtask(adapter); in ixgbe_service_task()
6769 if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { in ixgbe_service_task()
6770 ixgbe_ptp_overflow_check(adapter); in ixgbe_service_task()
6771 ixgbe_ptp_rx_hang(adapter); in ixgbe_service_task()
6774 ixgbe_service_event_complete(adapter); in ixgbe_service_task()
7233 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, in ixgbe_atr()
7242 struct ixgbe_adapter *adapter; in ixgbe_select_queue() local
7259 adapter = netdev_priv(dev); in ixgbe_select_queue()
7261 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) in ixgbe_select_queue()
7267 f = &adapter->ring_feature[RING_F_FCOE]; in ixgbe_select_queue()
7282 struct ixgbe_adapter *adapter, in ixgbe_xmit_frame_ring() argument
7332 adapter->ptp_clock && in ixgbe_xmit_frame_ring()
7334 &adapter->state)) { in ixgbe_xmit_frame_ring()
7339 adapter->ptp_tx_skb = skb_get(skb); in ixgbe_xmit_frame_ring()
7340 adapter->ptp_tx_start = jiffies; in ixgbe_xmit_frame_ring()
7341 schedule_work(&adapter->ptp_tx_work); in ixgbe_xmit_frame_ring()
7351 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_xmit_frame_ring()
7356 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_xmit_frame_ring()
7419 struct ixgbe_adapter *adapter = netdev_priv(netdev); in __ixgbe_xmit_frame() local
7429 tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; in __ixgbe_xmit_frame()
7431 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); in __ixgbe_xmit_frame()
7449 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_set_mac() local
7450 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_set_mac()
7457 ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); in ixgbe_set_mac()
7461 ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); in ixgbe_set_mac()
7468 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_mdio_read() local
7469 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_read()
7484 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_mdio_write() local
7485 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_mdio_write()
7494 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_ioctl() local
7498 return ixgbe_ptp_set_ts_config(adapter, req); in ixgbe_ioctl()
7500 return ixgbe_ptp_get_ts_config(adapter, req); in ixgbe_ioctl()
7502 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); in ixgbe_ioctl()
7516 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_add_sanmac_netdev() local
7517 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_sanmac_netdev()
7540 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_del_sanmac_netdev() local
7541 struct ixgbe_mac_info *mac = &adapter->hw.mac; in ixgbe_del_sanmac_netdev()
7559 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_netpoll() local
7563 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_netpoll()
7567 for (i = 0; i < adapter->num_q_vectors; i++) in ixgbe_netpoll()
7568 ixgbe_msix_clean_rings(0, adapter->q_vector[i]); in ixgbe_netpoll()
7575 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_get_stats64() local
7579 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbe_get_stats64()
7580 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); in ixgbe_get_stats64()
7595 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbe_get_stats64()
7596 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); in ixgbe_get_stats64()
7629 static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) in ixgbe_validate_rtr() argument
7631 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_validate_rtr()
7664 static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) in ixgbe_set_prio_tc_map() argument
7666 struct net_device *dev = adapter->netdev; in ixgbe_set_prio_tc_map()
7667 struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; in ixgbe_set_prio_tc_map()
7668 struct ieee_ets *ets = adapter->ixgbe_ieee_ets; in ixgbe_set_prio_tc_map()
7674 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) in ixgbe_set_prio_tc_map()
7692 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_setup_tc() local
7693 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_setup_tc()
7697 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || in ixgbe_setup_tc()
7702 pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); in ixgbe_setup_tc()
7703 if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) in ixgbe_setup_tc()
7712 ixgbe_clear_interrupt_scheme(adapter); in ixgbe_setup_tc()
7717 ixgbe_set_prio_tc_map(adapter); in ixgbe_setup_tc()
7719 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
7721 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { in ixgbe_setup_tc()
7722 adapter->last_lfc_mode = adapter->hw.fc.requested_mode; in ixgbe_setup_tc()
7723 adapter->hw.fc.requested_mode = ixgbe_fc_none; in ixgbe_setup_tc()
7728 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_setup_tc()
7729 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; in ixgbe_setup_tc()
7731 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; in ixgbe_setup_tc()
7733 adapter->temp_dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
7734 adapter->dcb_cfg.pfc_mode_enable = false; in ixgbe_setup_tc()
7737 ixgbe_validate_rtr(adapter, tc); in ixgbe_setup_tc()
7740 ixgbe_init_interrupt_scheme(adapter); in ixgbe_setup_tc()
7749 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) in ixgbe_sriov_reinit() argument
7751 struct net_device *netdev = adapter->netdev; in ixgbe_sriov_reinit()
7761 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_do_reset() local
7764 ixgbe_reinit_locked(adapter); in ixgbe_do_reset()
7766 ixgbe_reset(adapter); in ixgbe_do_reset()
7772 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_fix_features() local
7779 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) in ixgbe_fix_features()
7788 struct ixgbe_adapter *adapter = netdev_priv(netdev); in ixgbe_set_features() local
7794 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_set_features()
7796 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
7797 } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && in ixgbe_set_features()
7798 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { in ixgbe_set_features()
7799 if (adapter->rx_itr_setting == 1 || in ixgbe_set_features()
7800 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { in ixgbe_set_features()
7801 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; in ixgbe_set_features()
7816 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) in ixgbe_set_features()
7819 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
7820 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
7824 if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) in ixgbe_set_features()
7827 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; in ixgbe_set_features()
7830 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) in ixgbe_set_features()
7838 if (adapter->ring_feature[RING_F_RSS].limit <= 1) in ixgbe_set_features()
7842 if (!adapter->atr_sample_rate) in ixgbe_set_features()
7845 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; in ixgbe_set_features()
7850 ixgbe_vlan_strip_enable(adapter); in ixgbe_set_features()
7852 ixgbe_vlan_strip_disable(adapter); in ixgbe_set_features()
7873 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_add_vxlan_port() local
7874 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_add_vxlan_port()
7880 if (adapter->vxlan_port == new_port) { in ixgbe_add_vxlan_port()
7885 if (adapter->vxlan_port) { in ixgbe_add_vxlan_port()
7892 adapter->vxlan_port = new_port; in ixgbe_add_vxlan_port()
7905 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_del_vxlan_port() local
7906 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_del_vxlan_port()
7912 if (adapter->vxlan_port != new_port) { in ixgbe_del_vxlan_port()
7918 adapter->vxlan_port = 0; in ixgbe_del_vxlan_port()
7943 static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, in ixgbe_configure_bridge_mode() argument
7946 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_configure_bridge_mode()
7953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); in ixgbe_configure_bridge_mode()
7966 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
7976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, in ixgbe_configure_bridge_mode()
7983 if (!adapter->num_vfs) in ixgbe_configure_bridge_mode()
7990 num_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_configure_bridge_mode()
8002 adapter->bridge_mode = mode; in ixgbe_configure_bridge_mode()
8013 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_ndo_bridge_setlink() local
8017 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_setlink()
8035 status = ixgbe_configure_bridge_mode(adapter, mode); in ixgbe_ndo_bridge_setlink()
8049 struct ixgbe_adapter *adapter = netdev_priv(dev); in ixgbe_ndo_bridge_getlink() local
8051 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) in ixgbe_ndo_bridge_getlink()
8055 adapter->bridge_mode, 0, 0, nlflags); in ixgbe_ndo_bridge_getlink()
8061 struct ixgbe_adapter *adapter = netdev_priv(pdev); in ixgbe_fwd_add() local
8062 int used_pools = adapter->num_vfs + adapter->num_rx_pools; in ixgbe_fwd_add()
8089 if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && in ixgbe_fwd_add()
8090 adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || in ixgbe_fwd_add()
8091 (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) in ixgbe_fwd_add()
8098 pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); in ixgbe_fwd_add()
8099 adapter->num_rx_pools++; in ixgbe_fwd_add()
8100 set_bit(pool, &adapter->fwd_bitmask); in ixgbe_fwd_add()
8101 limit = find_last_bit(&adapter->fwd_bitmask, 32); in ixgbe_fwd_add()
8104 adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; in ixgbe_fwd_add()
8105 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; in ixgbe_fwd_add()
8106 adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; in ixgbe_fwd_add()
8113 fwd_adapter->real_adapter = adapter; in ixgbe_fwd_add()
8123 clear_bit(pool, &adapter->fwd_bitmask); in ixgbe_fwd_add()
8124 adapter->num_rx_pools--; in ixgbe_fwd_add()
8132 struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; in ixgbe_fwd_del() local
8135 clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); in ixgbe_fwd_del()
8136 adapter->num_rx_pools--; in ixgbe_fwd_del()
8138 limit = find_last_bit(&adapter->fwd_bitmask, 32); in ixgbe_fwd_del()
8139 adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; in ixgbe_fwd_del()
8143 fwd_adapter->pool, adapter->num_rx_pools, in ixgbe_fwd_del()
8145 fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, in ixgbe_fwd_del()
8146 adapter->fwd_bitmask); in ixgbe_fwd_del()
8208 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) in ixgbe_enumerate_functions() argument
8210 struct pci_dev *entry, *pdev = adapter->pdev; in ixgbe_enumerate_functions()
8217 if (ixgbe_pcie_from_parent(&adapter->hw)) in ixgbe_enumerate_functions()
8220 list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) { in ixgbe_enumerate_functions()
8251 int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, in ixgbe_wol_supported() argument
8254 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_wol_supported()
8255 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; in ixgbe_wol_supported()
8310 static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) in ixgbe_get_platform_mac_addr() argument
8313 struct device_node *dp = pci_device_to_OF_node(adapter->pdev); in ixgbe_get_platform_mac_addr()
8314 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_get_platform_mac_addr()
8343 struct ixgbe_adapter *adapter = NULL; in ixgbe_probe() local
8410 adapter = netdev_priv(netdev); in ixgbe_probe()
8412 adapter->netdev = netdev; in ixgbe_probe()
8413 adapter->pdev = pdev; in ixgbe_probe()
8414 hw = &adapter->hw; in ixgbe_probe()
8415 hw->back = adapter; in ixgbe_probe()
8416 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbe_probe()
8420 adapter->io_addr = hw->hw_addr; in ixgbe_probe()
8460 err = ixgbe_sw_init(adapter); in ixgbe_probe()
8465 switch (adapter->hw.mac.type) { in ixgbe_probe()
8470 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_probe()
8480 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { in ixgbe_probe()
8507 if (adapter->hw.mac.type == ixgbe_mac_82598EB) in ixgbe_probe()
8513 ixgbe_enable_sriov(adapter); in ixgbe_probe()
8529 switch (adapter->hw.mac.type) { in ixgbe_probe()
8554 switch (adapter->hw.mac.type) { in ixgbe_probe()
8568 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { in ixgbe_probe()
8574 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; in ixgbe_probe()
8579 adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; in ixgbe_probe()
8594 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) in ixgbe_probe()
8596 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) in ixgbe_probe()
8606 ixgbe_get_platform_mac_addr(adapter); in ixgbe_probe()
8616 ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); in ixgbe_probe()
8618 setup_timer(&adapter->service_timer, &ixgbe_service_timer, in ixgbe_probe()
8619 (unsigned long) adapter); in ixgbe_probe()
8625 INIT_WORK(&adapter->service_task, ixgbe_service_task); in ixgbe_probe()
8626 set_bit(__IXGBE_SERVICE_INITED, &adapter->state); in ixgbe_probe()
8627 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); in ixgbe_probe()
8629 err = ixgbe_init_interrupt_scheme(adapter); in ixgbe_probe()
8634 adapter->wol = 0; in ixgbe_probe()
8635 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); in ixgbe_probe()
8636 hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, in ixgbe_probe()
8639 adapter->wol = IXGBE_WUFC_MAG; in ixgbe_probe()
8641 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); in ixgbe_probe()
8644 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); in ixgbe_probe()
8645 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); in ixgbe_probe()
8650 ixgbe_get_parent_bus_info(adapter); in ixgbe_probe()
8659 expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); in ixgbe_probe()
8662 expected_gts = ixgbe_enumerate_functions(adapter) * 10; in ixgbe_probe()
8668 ixgbe_check_minimum_link(adapter, expected_gts); in ixgbe_probe()
8699 pci_set_drvdata(pdev, adapter); in ixgbe_probe()
8710 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; in ixgbe_probe()
8711 ixgbe_setup_dca(adapter); in ixgbe_probe()
8714 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { in ixgbe_probe()
8715 e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); in ixgbe_probe()
8716 for (i = 0; i < adapter->num_vfs; i++) in ixgbe_probe()
8733 if (ixgbe_sysfs_init(adapter)) in ixgbe_probe()
8737 ixgbe_dbg_adapter_init(adapter); in ixgbe_probe()
8748 ixgbe_release_hw_control(adapter); in ixgbe_probe()
8749 ixgbe_clear_interrupt_scheme(adapter); in ixgbe_probe()
8751 ixgbe_disable_sriov(adapter); in ixgbe_probe()
8752 adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; in ixgbe_probe()
8753 iounmap(adapter->io_addr); in ixgbe_probe()
8754 kfree(adapter->mac_table); in ixgbe_probe()
8756 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_probe()
8763 if (!adapter || disable_dev) in ixgbe_probe()
8779 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in ixgbe_remove() local
8784 if (!adapter) in ixgbe_remove()
8787 netdev = adapter->netdev; in ixgbe_remove()
8788 ixgbe_dbg_adapter_exit(adapter); in ixgbe_remove()
8790 set_bit(__IXGBE_REMOVING, &adapter->state); in ixgbe_remove()
8791 cancel_work_sync(&adapter->service_task); in ixgbe_remove()
8795 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { in ixgbe_remove()
8796 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; in ixgbe_remove()
8798 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); in ixgbe_remove()
8803 ixgbe_sysfs_exit(adapter); in ixgbe_remove()
8818 ixgbe_disable_sriov(adapter); in ixgbe_remove()
8820 ixgbe_clear_interrupt_scheme(adapter); in ixgbe_remove()
8822 ixgbe_release_hw_control(adapter); in ixgbe_remove()
8825 kfree(adapter->ixgbe_ieee_pfc); in ixgbe_remove()
8826 kfree(adapter->ixgbe_ieee_ets); in ixgbe_remove()
8829 iounmap(adapter->io_addr); in ixgbe_remove()
8835 kfree(adapter->mac_table); in ixgbe_remove()
8836 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_remove()
8856 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in ixgbe_io_error_detected() local
8857 struct net_device *netdev = adapter->netdev; in ixgbe_io_error_detected()
8860 struct ixgbe_hw *hw = &adapter->hw; in ixgbe_io_error_detected()
8866 if (adapter->hw.mac.type == ixgbe_mac_82598EB || in ixgbe_io_error_detected()
8867 adapter->num_vfs == 0) in ixgbe_io_error_detected()
8902 switch (adapter->hw.mac.type) { in ixgbe_io_error_detected()
8934 ixgbe_issue_vf_flr(adapter, vfdev); in ixgbe_io_error_detected()
8948 adapter->vferr_refcount++; in ixgbe_io_error_detected()
8954 if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) in ixgbe_io_error_detected()
8966 ixgbe_down(adapter); in ixgbe_io_error_detected()
8968 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) in ixgbe_io_error_detected()
8984 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in ixgbe_io_slot_reset() local
8993 clear_bit(__IXGBE_DISABLED, &adapter->state); in ixgbe_io_slot_reset()
8994 adapter->hw.hw_addr = adapter->io_addr; in ixgbe_io_slot_reset()
9001 ixgbe_reset(adapter); in ixgbe_io_slot_reset()
9002 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); in ixgbe_io_slot_reset()
9025 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); in ixgbe_io_resume() local
9026 struct net_device *netdev = adapter->netdev; in ixgbe_io_resume()
9029 if (adapter->vferr_refcount) { in ixgbe_io_resume()
9031 adapter->vferr_refcount--; in ixgbe_io_resume()
9037 ixgbe_up(adapter); in ixgbe_io_resume()