Lines Matching refs:adapter

143 static void igb_set_uta(struct igb_adapter *adapter);
168 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
180 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
231 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
364 static void igb_dump(struct igb_adapter *adapter) in igb_dump() argument
366 struct net_device *netdev = adapter->netdev; in igb_dump()
367 struct e1000_hw *hw = &adapter->hw; in igb_dump()
377 if (!netif_msg_hw(adapter)) in igb_dump()
382 dev_info(&adapter->pdev->dev, "Net device Info\n"); in igb_dump()
389 dev_info(&adapter->pdev->dev, "Register Dump\n"); in igb_dump()
400 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in igb_dump()
402 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
404 tx_ring = adapter->tx_ring[n]; in igb_dump()
415 if (!netif_msg_tx_done(adapter)) in igb_dump()
418 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in igb_dump()
431 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
432 tx_ring = adapter->tx_ring[n]; in igb_dump()
463 if (netif_msg_pktdata(adapter) && buffer_info->skb) in igb_dump()
474 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in igb_dump()
476 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
477 rx_ring = adapter->rx_ring[n]; in igb_dump()
483 if (!netif_msg_rx_status(adapter)) in igb_dump()
486 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in igb_dump()
509 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
510 rx_ring = adapter->rx_ring[n]; in igb_dump()
547 if (netif_msg_pktdata(adapter) && in igb_dump()
573 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_get_i2c_data() local
574 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_data()
589 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_set_i2c_data() local
590 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_data()
614 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_set_i2c_clk() local
615 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_clk()
637 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_get_i2c_clk() local
638 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_clk()
661 struct igb_adapter *adapter = hw->back; in igb_get_hw_dev() local
662 return adapter->netdev; in igb_get_hw_dev()
712 static void igb_cache_ring_register(struct igb_adapter *adapter) in igb_cache_ring_register() argument
715 u32 rbase_offset = adapter->vfs_allocated_count; in igb_cache_ring_register()
717 switch (adapter->hw.mac.type) { in igb_cache_ring_register()
724 if (adapter->vfs_allocated_count) { in igb_cache_ring_register()
725 for (; i < adapter->rss_queues; i++) in igb_cache_ring_register()
726 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
738 for (; i < adapter->num_rx_queues; i++) in igb_cache_ring_register()
739 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
740 for (; j < adapter->num_tx_queues; j++) in igb_cache_ring_register()
741 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
797 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector() local
798 struct e1000_hw *hw = &adapter->hw; in igb_assign_vector()
819 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) in igb_assign_vector()
867 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector()
880 static void igb_configure_msix(struct igb_adapter *adapter) in igb_configure_msix() argument
884 struct e1000_hw *hw = &adapter->hw; in igb_configure_msix()
886 adapter->eims_enable_mask = 0; in igb_configure_msix()
903 adapter->eims_other = E1000_EIMS_OTHER; in igb_configure_msix()
921 adapter->eims_other = 1 << vector; in igb_configure_msix()
931 adapter->eims_enable_mask |= adapter->eims_other; in igb_configure_msix()
933 for (i = 0; i < adapter->num_q_vectors; i++) in igb_configure_msix()
934 igb_assign_vector(adapter->q_vector[i], vector++); in igb_configure_msix()
946 static int igb_request_msix(struct igb_adapter *adapter) in igb_request_msix() argument
948 struct net_device *netdev = adapter->netdev; in igb_request_msix()
949 struct e1000_hw *hw = &adapter->hw; in igb_request_msix()
952 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
953 igb_msix_other, 0, netdev->name, adapter); in igb_request_msix()
957 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_request_msix()
958 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_request_msix()
976 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
983 igb_configure_msix(adapter); in igb_request_msix()
988 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igb_request_msix()
992 free_irq(adapter->msix_entries[free_vector++].vector, in igb_request_msix()
993 adapter->q_vector[i]); in igb_request_msix()
1006 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) in igb_free_q_vector() argument
1008 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_free_q_vector()
1010 adapter->q_vector[v_idx] = NULL; in igb_free_q_vector()
1027 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) in igb_reset_q_vector() argument
1029 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_reset_q_vector()
1038 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1041 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1047 static void igb_reset_interrupt_capability(struct igb_adapter *adapter) in igb_reset_interrupt_capability() argument
1049 int v_idx = adapter->num_q_vectors; in igb_reset_interrupt_capability()
1051 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_reset_interrupt_capability()
1052 pci_disable_msix(adapter->pdev); in igb_reset_interrupt_capability()
1053 else if (adapter->flags & IGB_FLAG_HAS_MSI) in igb_reset_interrupt_capability()
1054 pci_disable_msi(adapter->pdev); in igb_reset_interrupt_capability()
1057 igb_reset_q_vector(adapter, v_idx); in igb_reset_interrupt_capability()
1068 static void igb_free_q_vectors(struct igb_adapter *adapter) in igb_free_q_vectors() argument
1070 int v_idx = adapter->num_q_vectors; in igb_free_q_vectors()
1072 adapter->num_tx_queues = 0; in igb_free_q_vectors()
1073 adapter->num_rx_queues = 0; in igb_free_q_vectors()
1074 adapter->num_q_vectors = 0; in igb_free_q_vectors()
1077 igb_reset_q_vector(adapter, v_idx); in igb_free_q_vectors()
1078 igb_free_q_vector(adapter, v_idx); in igb_free_q_vectors()
1089 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) in igb_clear_interrupt_scheme() argument
1091 igb_free_q_vectors(adapter); in igb_clear_interrupt_scheme()
1092 igb_reset_interrupt_capability(adapter); in igb_clear_interrupt_scheme()
1103 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) in igb_set_interrupt_capability() argument
1110 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1113 adapter->num_rx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1114 if (adapter->vfs_allocated_count) in igb_set_interrupt_capability()
1115 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1117 adapter->num_tx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1120 numvecs = adapter->num_rx_queues; in igb_set_interrupt_capability()
1123 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) in igb_set_interrupt_capability()
1124 numvecs += adapter->num_tx_queues; in igb_set_interrupt_capability()
1127 adapter->num_q_vectors = numvecs; in igb_set_interrupt_capability()
1132 adapter->msix_entries[i].entry = i; in igb_set_interrupt_capability()
1134 err = pci_enable_msix_range(adapter->pdev, in igb_set_interrupt_capability()
1135 adapter->msix_entries, in igb_set_interrupt_capability()
1141 igb_reset_interrupt_capability(adapter); in igb_set_interrupt_capability()
1145 adapter->flags &= ~IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1148 if (adapter->vf_data) { in igb_set_interrupt_capability()
1149 struct e1000_hw *hw = &adapter->hw; in igb_set_interrupt_capability()
1151 pci_disable_sriov(adapter->pdev); in igb_set_interrupt_capability()
1154 kfree(adapter->vf_data); in igb_set_interrupt_capability()
1155 adapter->vf_data = NULL; in igb_set_interrupt_capability()
1159 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); in igb_set_interrupt_capability()
1162 adapter->vfs_allocated_count = 0; in igb_set_interrupt_capability()
1163 adapter->rss_queues = 1; in igb_set_interrupt_capability()
1164 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_interrupt_capability()
1165 adapter->num_rx_queues = 1; in igb_set_interrupt_capability()
1166 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1167 adapter->num_q_vectors = 1; in igb_set_interrupt_capability()
1168 if (!pci_enable_msi(adapter->pdev)) in igb_set_interrupt_capability()
1169 adapter->flags |= IGB_FLAG_HAS_MSI; in igb_set_interrupt_capability()
1191 static int igb_alloc_q_vector(struct igb_adapter *adapter, in igb_alloc_q_vector() argument
1209 q_vector = adapter->q_vector[v_idx]; in igb_alloc_q_vector()
1222 netif_napi_add(adapter->netdev, &q_vector->napi, in igb_alloc_q_vector()
1226 adapter->q_vector[v_idx] = q_vector; in igb_alloc_q_vector()
1227 q_vector->adapter = adapter; in igb_alloc_q_vector()
1230 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_alloc_q_vector()
1233 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); in igb_alloc_q_vector()
1242 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igb_alloc_q_vector()
1243 q_vector->itr_val = adapter->rx_itr_setting; in igb_alloc_q_vector()
1246 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igb_alloc_q_vector()
1247 q_vector->itr_val = adapter->tx_itr_setting; in igb_alloc_q_vector()
1252 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1253 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1262 if (adapter->hw.mac.type == e1000_82575) in igb_alloc_q_vector()
1266 ring->count = adapter->tx_ring_count; in igb_alloc_q_vector()
1273 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
1281 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1282 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1291 if (adapter->hw.mac.type >= e1000_82576) in igb_alloc_q_vector()
1297 if (adapter->hw.mac.type >= e1000_i350) in igb_alloc_q_vector()
1301 ring->count = adapter->rx_ring_count; in igb_alloc_q_vector()
1307 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1321 static int igb_alloc_q_vectors(struct igb_adapter *adapter) in igb_alloc_q_vectors() argument
1323 int q_vectors = adapter->num_q_vectors; in igb_alloc_q_vectors()
1324 int rxr_remaining = adapter->num_rx_queues; in igb_alloc_q_vectors()
1325 int txr_remaining = adapter->num_tx_queues; in igb_alloc_q_vectors()
1331 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, in igb_alloc_q_vectors()
1347 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, in igb_alloc_q_vectors()
1363 adapter->num_tx_queues = 0; in igb_alloc_q_vectors()
1364 adapter->num_rx_queues = 0; in igb_alloc_q_vectors()
1365 adapter->num_q_vectors = 0; in igb_alloc_q_vectors()
1368 igb_free_q_vector(adapter, v_idx); in igb_alloc_q_vectors()
1380 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) in igb_init_interrupt_scheme() argument
1382 struct pci_dev *pdev = adapter->pdev; in igb_init_interrupt_scheme()
1385 igb_set_interrupt_capability(adapter, msix); in igb_init_interrupt_scheme()
1387 err = igb_alloc_q_vectors(adapter); in igb_init_interrupt_scheme()
1393 igb_cache_ring_register(adapter); in igb_init_interrupt_scheme()
1398 igb_reset_interrupt_capability(adapter); in igb_init_interrupt_scheme()
1409 static int igb_request_irq(struct igb_adapter *adapter) in igb_request_irq() argument
1411 struct net_device *netdev = adapter->netdev; in igb_request_irq()
1412 struct pci_dev *pdev = adapter->pdev; in igb_request_irq()
1415 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_request_irq()
1416 err = igb_request_msix(adapter); in igb_request_irq()
1420 igb_free_all_tx_resources(adapter); in igb_request_irq()
1421 igb_free_all_rx_resources(adapter); in igb_request_irq()
1423 igb_clear_interrupt_scheme(adapter); in igb_request_irq()
1424 err = igb_init_interrupt_scheme(adapter, false); in igb_request_irq()
1428 igb_setup_all_tx_resources(adapter); in igb_request_irq()
1429 igb_setup_all_rx_resources(adapter); in igb_request_irq()
1430 igb_configure(adapter); in igb_request_irq()
1433 igb_assign_vector(adapter->q_vector[0], 0); in igb_request_irq()
1435 if (adapter->flags & IGB_FLAG_HAS_MSI) { in igb_request_irq()
1437 netdev->name, adapter); in igb_request_irq()
1442 igb_reset_interrupt_capability(adapter); in igb_request_irq()
1443 adapter->flags &= ~IGB_FLAG_HAS_MSI; in igb_request_irq()
1447 netdev->name, adapter); in igb_request_irq()
1457 static void igb_free_irq(struct igb_adapter *adapter) in igb_free_irq() argument
1459 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_free_irq()
1462 free_irq(adapter->msix_entries[vector++].vector, adapter); in igb_free_irq()
1464 for (i = 0; i < adapter->num_q_vectors; i++) in igb_free_irq()
1465 free_irq(adapter->msix_entries[vector++].vector, in igb_free_irq()
1466 adapter->q_vector[i]); in igb_free_irq()
1468 free_irq(adapter->pdev->irq, adapter); in igb_free_irq()
1476 static void igb_irq_disable(struct igb_adapter *adapter) in igb_irq_disable() argument
1478 struct e1000_hw *hw = &adapter->hw; in igb_irq_disable()
1484 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1487 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1488 wr32(E1000_EIMC, adapter->eims_enable_mask); in igb_irq_disable()
1490 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1496 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1499 for (i = 0; i < adapter->num_q_vectors; i++) in igb_irq_disable()
1500 synchronize_irq(adapter->msix_entries[i].vector); in igb_irq_disable()
1502 synchronize_irq(adapter->pdev->irq); in igb_irq_disable()
1510 static void igb_irq_enable(struct igb_adapter *adapter) in igb_irq_enable() argument
1512 struct e1000_hw *hw = &adapter->hw; in igb_irq_enable()
1514 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_enable()
1518 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); in igb_irq_enable()
1520 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); in igb_irq_enable()
1521 wr32(E1000_EIMS, adapter->eims_enable_mask); in igb_irq_enable()
1522 if (adapter->vfs_allocated_count) { in igb_irq_enable()
1535 static void igb_update_mng_vlan(struct igb_adapter *adapter) in igb_update_mng_vlan() argument
1537 struct e1000_hw *hw = &adapter->hw; in igb_update_mng_vlan()
1538 u16 vid = adapter->hw.mng_cookie.vlan_id; in igb_update_mng_vlan()
1539 u16 old_vid = adapter->mng_vlan_id; in igb_update_mng_vlan()
1544 adapter->mng_vlan_id = vid; in igb_update_mng_vlan()
1546 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; in igb_update_mng_vlan()
1551 !test_bit(old_vid, adapter->active_vlans)) { in igb_update_mng_vlan()
1565 static void igb_release_hw_control(struct igb_adapter *adapter) in igb_release_hw_control() argument
1567 struct e1000_hw *hw = &adapter->hw; in igb_release_hw_control()
1584 static void igb_get_hw_control(struct igb_adapter *adapter) in igb_get_hw_control() argument
1586 struct e1000_hw *hw = &adapter->hw; in igb_get_hw_control()
1599 static void igb_configure(struct igb_adapter *adapter) in igb_configure() argument
1601 struct net_device *netdev = adapter->netdev; in igb_configure()
1604 igb_get_hw_control(adapter); in igb_configure()
1607 igb_restore_vlan(adapter); in igb_configure()
1609 igb_setup_tctl(adapter); in igb_configure()
1610 igb_setup_mrqc(adapter); in igb_configure()
1611 igb_setup_rctl(adapter); in igb_configure()
1613 igb_configure_tx(adapter); in igb_configure()
1614 igb_configure_rx(adapter); in igb_configure()
1616 igb_rx_fifo_flush_82575(&adapter->hw); in igb_configure()
1622 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure()
1623 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
1632 void igb_power_up_link(struct igb_adapter *adapter) in igb_power_up_link() argument
1634 igb_reset_phy(&adapter->hw); in igb_power_up_link()
1636 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_up_link()
1637 igb_power_up_phy_copper(&adapter->hw); in igb_power_up_link()
1639 igb_power_up_serdes_link_82575(&adapter->hw); in igb_power_up_link()
1641 igb_setup_link(&adapter->hw); in igb_power_up_link()
1648 static void igb_power_down_link(struct igb_adapter *adapter) in igb_power_down_link() argument
1650 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_down_link()
1651 igb_power_down_phy_copper_82575(&adapter->hw); in igb_power_down_link()
1653 igb_shutdown_serdes_link_82575(&adapter->hw); in igb_power_down_link()
1660 static void igb_check_swap_media(struct igb_adapter *adapter) in igb_check_swap_media() argument
1662 struct e1000_hw *hw = &adapter->hw; in igb_check_swap_media()
1678 if (adapter->copper_tries < 4) { in igb_check_swap_media()
1679 adapter->copper_tries++; in igb_check_swap_media()
1684 adapter->copper_tries = 0; in igb_check_swap_media()
1699 netdev_info(adapter->netdev, in igb_check_swap_media()
1703 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
1704 adapter->copper_tries = 0; in igb_check_swap_media()
1708 netdev_info(adapter->netdev, in igb_check_swap_media()
1712 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
1716 netdev_err(adapter->netdev, in igb_check_swap_media()
1727 int igb_up(struct igb_adapter *adapter) in igb_up() argument
1729 struct e1000_hw *hw = &adapter->hw; in igb_up()
1733 igb_configure(adapter); in igb_up()
1735 clear_bit(__IGB_DOWN, &adapter->state); in igb_up()
1737 for (i = 0; i < adapter->num_q_vectors; i++) in igb_up()
1738 napi_enable(&(adapter->q_vector[i]->napi)); in igb_up()
1740 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_up()
1741 igb_configure_msix(adapter); in igb_up()
1743 igb_assign_vector(adapter->q_vector[0], 0); in igb_up()
1747 igb_irq_enable(adapter); in igb_up()
1750 if (adapter->vfs_allocated_count) { in igb_up()
1757 netif_tx_start_all_queues(adapter->netdev); in igb_up()
1761 schedule_work(&adapter->watchdog_task); in igb_up()
1763 if ((adapter->flags & IGB_FLAG_EEE) && in igb_up()
1765 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in igb_up()
1770 void igb_down(struct igb_adapter *adapter) in igb_down() argument
1772 struct net_device *netdev = adapter->netdev; in igb_down()
1773 struct e1000_hw *hw = &adapter->hw; in igb_down()
1780 set_bit(__IGB_DOWN, &adapter->state); in igb_down()
1798 igb_irq_disable(adapter); in igb_down()
1800 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_down()
1802 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_down()
1803 if (adapter->q_vector[i]) { in igb_down()
1804 napi_synchronize(&adapter->q_vector[i]->napi); in igb_down()
1805 napi_disable(&adapter->q_vector[i]->napi); in igb_down()
1809 del_timer_sync(&adapter->watchdog_timer); in igb_down()
1810 del_timer_sync(&adapter->phy_info_timer); in igb_down()
1813 spin_lock(&adapter->stats64_lock); in igb_down()
1814 igb_update_stats(adapter, &adapter->stats64); in igb_down()
1815 spin_unlock(&adapter->stats64_lock); in igb_down()
1817 adapter->link_speed = 0; in igb_down()
1818 adapter->link_duplex = 0; in igb_down()
1820 if (!pci_channel_offline(adapter->pdev)) in igb_down()
1821 igb_reset(adapter); in igb_down()
1822 igb_clean_all_tx_rings(adapter); in igb_down()
1823 igb_clean_all_rx_rings(adapter); in igb_down()
1827 igb_setup_dca(adapter); in igb_down()
1831 void igb_reinit_locked(struct igb_adapter *adapter) in igb_reinit_locked() argument
1834 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_reinit_locked()
1836 igb_down(adapter); in igb_reinit_locked()
1837 igb_up(adapter); in igb_reinit_locked()
1838 clear_bit(__IGB_RESETTING, &adapter->state); in igb_reinit_locked()
1845 static void igb_enable_mas(struct igb_adapter *adapter) in igb_enable_mas() argument
1847 struct e1000_hw *hw = &adapter->hw; in igb_enable_mas()
1860 void igb_reset(struct igb_adapter *adapter) in igb_reset() argument
1862 struct pci_dev *pdev = adapter->pdev; in igb_reset()
1863 struct e1000_hw *hw = &adapter->hw; in igb_reset()
1890 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && in igb_reset()
1910 min_tx_space = (adapter->max_frame_size + in igb_reset()
1916 min_rx_space = adapter->max_frame_size; in igb_reset()
1945 ((pba << 10) - 2 * adapter->max_frame_size)); in igb_reset()
1954 if (adapter->vfs_allocated_count) { in igb_reset()
1957 for (i = 0 ; i < adapter->vfs_allocated_count; i++) in igb_reset()
1958 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_reset()
1961 igb_ping_all_vfs(adapter); in igb_reset()
1972 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_reset()
1974 adapter->ei.get_invariants(hw); in igb_reset()
1975 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; in igb_reset()
1978 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_reset()
1979 igb_enable_mas(adapter); in igb_reset()
1990 igb_init_dmac(adapter, pba); in igb_reset()
1993 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_reset()
1998 if (adapter->ets) in igb_reset()
2018 if (!netif_running(adapter->netdev)) in igb_reset()
2019 igb_power_down_link(adapter); in igb_reset()
2021 igb_update_mng_vlan(adapter); in igb_reset()
2027 igb_ptp_reset(adapter); in igb_reset()
2050 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_features() local
2061 igb_reinit_locked(adapter); in igb_set_features()
2063 igb_reset(adapter); in igb_set_features()
2098 void igb_set_fw_version(struct igb_adapter *adapter) in igb_set_fw_version() argument
2100 struct e1000_hw *hw = &adapter->hw; in igb_set_fw_version()
2109 snprintf(adapter->fw_version, in igb_set_fw_version()
2110 sizeof(adapter->fw_version), in igb_set_fw_version()
2120 snprintf(adapter->fw_version, in igb_set_fw_version()
2121 sizeof(adapter->fw_version), in igb_set_fw_version()
2127 snprintf(adapter->fw_version, in igb_set_fw_version()
2128 sizeof(adapter->fw_version), in igb_set_fw_version()
2132 snprintf(adapter->fw_version, in igb_set_fw_version()
2133 sizeof(adapter->fw_version), in igb_set_fw_version()
2146 static void igb_init_mas(struct igb_adapter *adapter) in igb_init_mas() argument
2148 struct e1000_hw *hw = &adapter->hw; in igb_init_mas()
2155 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2156 netdev_info(adapter->netdev, in igb_init_mas()
2163 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2164 netdev_info(adapter->netdev, in igb_init_mas()
2171 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2172 netdev_info(adapter->netdev, in igb_init_mas()
2179 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2180 netdev_info(adapter->netdev, in igb_init_mas()
2187 netdev_err(adapter->netdev, in igb_init_mas()
2197 static s32 igb_init_i2c(struct igb_adapter *adapter) in igb_init_i2c() argument
2202 if (adapter->hw.mac.type != e1000_i350) in igb_init_i2c()
2209 adapter->i2c_adap.owner = THIS_MODULE; in igb_init_i2c()
2210 adapter->i2c_algo = igb_i2c_algo; in igb_init_i2c()
2211 adapter->i2c_algo.data = adapter; in igb_init_i2c()
2212 adapter->i2c_adap.algo_data = &adapter->i2c_algo; in igb_init_i2c()
2213 adapter->i2c_adap.dev.parent = &adapter->pdev->dev; in igb_init_i2c()
2214 strlcpy(adapter->i2c_adap.name, "igb BB", in igb_init_i2c()
2215 sizeof(adapter->i2c_adap.name)); in igb_init_i2c()
2216 status = i2c_bit_add_bus(&adapter->i2c_adap); in igb_init_i2c()
2234 struct igb_adapter *adapter; in igb_probe() local
2289 adapter = netdev_priv(netdev); in igb_probe()
2290 adapter->netdev = netdev; in igb_probe()
2291 adapter->pdev = pdev; in igb_probe()
2292 hw = &adapter->hw; in igb_probe()
2293 hw->back = adapter; in igb_probe()
2294 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igb_probe()
2327 err = igb_sw_init(adapter); in igb_probe()
2387 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); in igb_probe()
2431 igb_set_fw_version(adapter); in igb_probe()
2439 setup_timer(&adapter->watchdog_timer, igb_watchdog, in igb_probe()
2440 (unsigned long) adapter); in igb_probe()
2441 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, in igb_probe()
2442 (unsigned long) adapter); in igb_probe()
2444 INIT_WORK(&adapter->reset_task, igb_reset_task); in igb_probe()
2445 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); in igb_probe()
2448 adapter->fc_autoneg = true; in igb_probe()
2459 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2470 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2478 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2487 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2493 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2495 adapter->flags |= IGB_FLAG_QUAD_PORT_A; in igb_probe()
2502 if (!device_can_wakeup(&adapter->pdev->dev)) in igb_probe()
2503 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2507 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) in igb_probe()
2508 adapter->wol |= E1000_WUFC_MAG; in igb_probe()
2513 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2514 adapter->wol = 0; in igb_probe()
2517 device_set_wakeup_enable(&adapter->pdev->dev, in igb_probe()
2518 adapter->flags & IGB_FLAG_WOL_SUPPORTED); in igb_probe()
2521 igb_reset(adapter); in igb_probe()
2524 err = igb_init_i2c(adapter); in igb_probe()
2533 igb_get_hw_control(adapter); in igb_probe()
2545 adapter->flags |= IGB_FLAG_DCA_ENABLED; in igb_probe()
2547 igb_setup_dca(adapter); in igb_probe()
2561 adapter->ets = true; in igb_probe()
2563 adapter->ets = false; in igb_probe()
2564 if (igb_sysfs_init(adapter)) in igb_probe()
2568 adapter->ets = false; in igb_probe()
2572 adapter->ei = *ei; in igb_probe()
2574 igb_init_mas(adapter); in igb_probe()
2577 igb_ptp_init(adapter); in igb_probe()
2608 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : in igb_probe()
2609 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", in igb_probe()
2610 adapter->num_rx_queues, adapter->num_tx_queues); in igb_probe()
2620 adapter->eee_advert = in igb_probe()
2622 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
2631 adapter->eee_advert = in igb_probe()
2633 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
2645 igb_release_hw_control(adapter); in igb_probe()
2646 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); in igb_probe()
2654 kfree(adapter->shadow_vfta); in igb_probe()
2655 igb_clear_interrupt_scheme(adapter); in igb_probe()
2675 struct igb_adapter *adapter = netdev_priv(netdev); in igb_disable_sriov() local
2676 struct e1000_hw *hw = &adapter->hw; in igb_disable_sriov()
2679 if (adapter->vf_data) { in igb_disable_sriov()
2690 kfree(adapter->vf_data); in igb_disable_sriov()
2691 adapter->vf_data = NULL; in igb_disable_sriov()
2692 adapter->vfs_allocated_count = 0; in igb_disable_sriov()
2699 adapter->flags |= IGB_FLAG_DMAC; in igb_disable_sriov()
2708 struct igb_adapter *adapter = netdev_priv(netdev); in igb_enable_sriov() local
2713 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { in igb_enable_sriov()
2723 adapter->vfs_allocated_count = old_vfs; in igb_enable_sriov()
2725 adapter->vfs_allocated_count = num_vfs; in igb_enable_sriov()
2727 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, in igb_enable_sriov()
2731 if (!adapter->vf_data) { in igb_enable_sriov()
2732 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
2741 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); in igb_enable_sriov()
2746 adapter->vfs_allocated_count); in igb_enable_sriov()
2747 for (i = 0; i < adapter->vfs_allocated_count; i++) in igb_enable_sriov()
2748 igb_vf_configure(adapter, i); in igb_enable_sriov()
2751 adapter->flags &= ~IGB_FLAG_DMAC; in igb_enable_sriov()
2755 kfree(adapter->vf_data); in igb_enable_sriov()
2756 adapter->vf_data = NULL; in igb_enable_sriov()
2757 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
2767 static void igb_remove_i2c(struct igb_adapter *adapter) in igb_remove_i2c() argument
2770 i2c_del_adapter(&adapter->i2c_adap); in igb_remove_i2c()
2785 struct igb_adapter *adapter = netdev_priv(netdev); in igb_remove() local
2786 struct e1000_hw *hw = &adapter->hw; in igb_remove()
2790 igb_sysfs_exit(adapter); in igb_remove()
2792 igb_remove_i2c(adapter); in igb_remove()
2793 igb_ptp_stop(adapter); in igb_remove()
2797 set_bit(__IGB_DOWN, &adapter->state); in igb_remove()
2798 del_timer_sync(&adapter->watchdog_timer); in igb_remove()
2799 del_timer_sync(&adapter->phy_info_timer); in igb_remove()
2801 cancel_work_sync(&adapter->reset_task); in igb_remove()
2802 cancel_work_sync(&adapter->watchdog_task); in igb_remove()
2805 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in igb_remove()
2808 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in igb_remove()
2816 igb_release_hw_control(adapter); in igb_remove()
2824 igb_clear_interrupt_scheme(adapter); in igb_remove()
2832 kfree(adapter->shadow_vfta); in igb_remove()
2849 static void igb_probe_vfs(struct igb_adapter *adapter) in igb_probe_vfs() argument
2852 struct pci_dev *pdev = adapter->pdev; in igb_probe_vfs()
2853 struct e1000_hw *hw = &adapter->hw; in igb_probe_vfs()
2865 static void igb_init_queue_configuration(struct igb_adapter *adapter) in igb_init_queue_configuration() argument
2867 struct e1000_hw *hw = &adapter->hw; in igb_init_queue_configuration()
2881 if (!!adapter->vfs_allocated_count) { in igb_init_queue_configuration()
2887 if (!!adapter->vfs_allocated_count) { in igb_init_queue_configuration()
2899 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igb_init_queue_configuration()
2901 igb_set_flag_queue_pairs(adapter, max_rss_queues); in igb_init_queue_configuration()
2904 void igb_set_flag_queue_pairs(struct igb_adapter *adapter, in igb_set_flag_queue_pairs() argument
2907 struct e1000_hw *hw = &adapter->hw; in igb_set_flag_queue_pairs()
2920 if ((adapter->rss_queues > 1) && in igb_set_flag_queue_pairs()
2921 (adapter->vfs_allocated_count > 6)) in igb_set_flag_queue_pairs()
2922 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
2932 if (adapter->rss_queues > (max_rss_queues / 2)) in igb_set_flag_queue_pairs()
2933 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
2946 static int igb_sw_init(struct igb_adapter *adapter) in igb_sw_init() argument
2948 struct e1000_hw *hw = &adapter->hw; in igb_sw_init()
2949 struct net_device *netdev = adapter->netdev; in igb_sw_init()
2950 struct pci_dev *pdev = adapter->pdev; in igb_sw_init()
2955 adapter->tx_ring_count = IGB_DEFAULT_TXD; in igb_sw_init()
2956 adapter->rx_ring_count = IGB_DEFAULT_RXD; in igb_sw_init()
2959 adapter->rx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
2960 adapter->tx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
2963 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; in igb_sw_init()
2965 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igb_sw_init()
2967 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igb_sw_init()
2969 spin_lock_init(&adapter->stats64_lock); in igb_sw_init()
2977 max_vfs = adapter->vfs_allocated_count = 7; in igb_sw_init()
2979 adapter->vfs_allocated_count = max_vfs; in igb_sw_init()
2980 if (adapter->vfs_allocated_count) in igb_sw_init()
2990 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_sw_init()
2992 igb_probe_vfs(adapter); in igb_sw_init()
2994 igb_init_queue_configuration(adapter); in igb_sw_init()
2997 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), in igb_sw_init()
3001 if (igb_init_interrupt_scheme(adapter, true)) { in igb_sw_init()
3007 igb_irq_disable(adapter); in igb_sw_init()
3010 adapter->flags &= ~IGB_FLAG_DMAC; in igb_sw_init()
3012 set_bit(__IGB_DOWN, &adapter->state); in igb_sw_init()
3030 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_open() local
3031 struct e1000_hw *hw = &adapter->hw; in __igb_open()
3032 struct pci_dev *pdev = adapter->pdev; in __igb_open()
3037 if (test_bit(__IGB_TESTING, &adapter->state)) { in __igb_open()
3048 err = igb_setup_all_tx_resources(adapter); in __igb_open()
3053 err = igb_setup_all_rx_resources(adapter); in __igb_open()
3057 igb_power_up_link(adapter); in __igb_open()
3064 igb_configure(adapter); in __igb_open()
3066 err = igb_request_irq(adapter); in __igb_open()
3071 err = netif_set_real_num_tx_queues(adapter->netdev, in __igb_open()
3072 adapter->num_tx_queues); in __igb_open()
3076 err = netif_set_real_num_rx_queues(adapter->netdev, in __igb_open()
3077 adapter->num_rx_queues); in __igb_open()
3082 clear_bit(__IGB_DOWN, &adapter->state); in __igb_open()
3084 for (i = 0; i < adapter->num_q_vectors; i++) in __igb_open()
3085 napi_enable(&(adapter->q_vector[i]->napi)); in __igb_open()
3090 igb_irq_enable(adapter); in __igb_open()
3093 if (adapter->vfs_allocated_count) { in __igb_open()
3107 schedule_work(&adapter->watchdog_task); in __igb_open()
3112 igb_free_irq(adapter); in __igb_open()
3114 igb_release_hw_control(adapter); in __igb_open()
3115 igb_power_down_link(adapter); in __igb_open()
3116 igb_free_all_rx_resources(adapter); in __igb_open()
3118 igb_free_all_tx_resources(adapter); in __igb_open()
3120 igb_reset(adapter); in __igb_open()
3145 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_close() local
3146 struct pci_dev *pdev = adapter->pdev; in __igb_close()
3148 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); in __igb_close()
3153 igb_down(adapter); in __igb_close()
3154 igb_free_irq(adapter); in __igb_close()
3156 igb_free_all_tx_resources(adapter); in __igb_close()
3157 igb_free_all_rx_resources(adapter); in __igb_close()
3214 static int igb_setup_all_tx_resources(struct igb_adapter *adapter) in igb_setup_all_tx_resources() argument
3216 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_tx_resources()
3219 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_setup_all_tx_resources()
3220 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3225 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3237 void igb_setup_tctl(struct igb_adapter *adapter) in igb_setup_tctl() argument
3239 struct e1000_hw *hw = &adapter->hw; in igb_setup_tctl()
3266 void igb_configure_tx_ring(struct igb_adapter *adapter, in igb_configure_tx_ring() argument
3269 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx_ring()
3303 static void igb_configure_tx(struct igb_adapter *adapter) in igb_configure_tx() argument
3307 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
3308 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
3357 static int igb_setup_all_rx_resources(struct igb_adapter *adapter) in igb_setup_all_rx_resources() argument
3359 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_rx_resources()
3362 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_setup_all_rx_resources()
3363 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3368 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3380 static void igb_setup_mrqc(struct igb_adapter *adapter) in igb_setup_mrqc() argument
3382 struct e1000_hw *hw = &adapter->hw; in igb_setup_mrqc()
3391 num_rx_queues = adapter->rss_queues; in igb_setup_mrqc()
3396 if (adapter->vfs_allocated_count) in igb_setup_mrqc()
3403 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igb_setup_mrqc()
3405 adapter->rss_indir_tbl[j] = in igb_setup_mrqc()
3407 adapter->rss_indir_tbl_init = num_rx_queues; in igb_setup_mrqc()
3409 igb_write_rss_indir_tbl(adapter); in igb_setup_mrqc()
3418 if (adapter->hw.mac.type >= e1000_82576) in igb_setup_mrqc()
3434 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) in igb_setup_mrqc()
3436 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) in igb_setup_mrqc()
3443 if (adapter->vfs_allocated_count) { in igb_setup_mrqc()
3450 vtctl |= adapter->vfs_allocated_count << in igb_setup_mrqc()
3454 if (adapter->rss_queues > 1) in igb_setup_mrqc()
3462 igb_vmm_control(adapter); in igb_setup_mrqc()
3471 void igb_setup_rctl(struct igb_adapter *adapter) in igb_setup_rctl() argument
3473 struct e1000_hw *hw = &adapter->hw; in igb_setup_rctl()
3503 if (adapter->vfs_allocated_count) { in igb_setup_rctl()
3509 if (adapter->netdev->features & NETIF_F_RXALL) { in igb_setup_rctl()
3528 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, in igb_set_vf_rlpml() argument
3531 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_rlpml()
3537 if (vfn < adapter->vfs_allocated_count && in igb_set_vf_rlpml()
3538 adapter->vf_data[vfn].vlans_enabled) in igb_set_vf_rlpml()
3555 static void igb_rlpml_set(struct igb_adapter *adapter) in igb_rlpml_set() argument
3557 u32 max_frame_size = adapter->max_frame_size; in igb_rlpml_set()
3558 struct e1000_hw *hw = &adapter->hw; in igb_rlpml_set()
3559 u16 pf_id = adapter->vfs_allocated_count; in igb_rlpml_set()
3562 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); in igb_rlpml_set()
3575 static inline void igb_set_vmolr(struct igb_adapter *adapter, in igb_set_vmolr() argument
3578 struct e1000_hw *hw = &adapter->hw; in igb_set_vmolr()
3604 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) in igb_set_vmolr()
3609 if (vfn <= adapter->vfs_allocated_count) in igb_set_vmolr()
3622 void igb_configure_rx_ring(struct igb_adapter *adapter, in igb_configure_rx_ring() argument
3625 struct e1000_hw *hw = &adapter->hw; in igb_configure_rx_ring()
3652 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) in igb_configure_rx_ring()
3658 igb_set_vmolr(adapter, reg_idx & 0x7, true); in igb_configure_rx_ring()
3675 static void igb_configure_rx(struct igb_adapter *adapter) in igb_configure_rx() argument
3680 igb_set_uta(adapter); in igb_configure_rx()
3683 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, in igb_configure_rx()
3684 adapter->vfs_allocated_count); in igb_configure_rx()
3689 for (i = 0; i < adapter->num_rx_queues; i++) in igb_configure_rx()
3690 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); in igb_configure_rx()
3722 static void igb_free_all_tx_resources(struct igb_adapter *adapter) in igb_free_all_tx_resources() argument
3726 for (i = 0; i < adapter->num_tx_queues; i++) in igb_free_all_tx_resources()
3727 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
3728 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
3788 static void igb_clean_all_tx_rings(struct igb_adapter *adapter) in igb_clean_all_tx_rings() argument
3792 for (i = 0; i < adapter->num_tx_queues; i++) in igb_clean_all_tx_rings()
3793 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
3794 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
3826 static void igb_free_all_rx_resources(struct igb_adapter *adapter) in igb_free_all_rx_resources() argument
3830 for (i = 0; i < adapter->num_rx_queues; i++) in igb_free_all_rx_resources()
3831 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
3832 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
3882 static void igb_clean_all_rx_rings(struct igb_adapter *adapter) in igb_clean_all_rx_rings() argument
3886 for (i = 0; i < adapter->num_rx_queues; i++) in igb_clean_all_rx_rings()
3887 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
3888 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
3900 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_mac() local
3901 struct e1000_hw *hw = &adapter->hw; in igb_set_mac()
3911 igb_rar_set_qsel(adapter, hw->mac.addr, 0, in igb_set_mac()
3912 adapter->vfs_allocated_count); in igb_set_mac()
3928 struct igb_adapter *adapter = netdev_priv(netdev); in igb_write_mc_addr_list() local
3929 struct e1000_hw *hw = &adapter->hw; in igb_write_mc_addr_list()
3937 igb_restore_vf_multicasts(adapter); in igb_write_mc_addr_list()
3967 struct igb_adapter *adapter = netdev_priv(netdev); in igb_write_uc_addr_list() local
3968 struct e1000_hw *hw = &adapter->hw; in igb_write_uc_addr_list()
3969 unsigned int vfn = adapter->vfs_allocated_count; in igb_write_uc_addr_list()
3983 igb_rar_set_qsel(adapter, ha->addr, in igb_write_uc_addr_list()
4010 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_rx_mode() local
4011 struct e1000_hw *hw = &adapter->hw; in igb_set_rx_mode()
4012 unsigned int vfn = adapter->vfs_allocated_count; in igb_set_rx_mode()
4024 if (adapter->vfs_allocated_count) in igb_set_rx_mode()
4069 igb_restore_vf_multicasts(adapter); in igb_set_rx_mode()
4072 static void igb_check_wvbr(struct igb_adapter *adapter) in igb_check_wvbr() argument
4074 struct e1000_hw *hw = &adapter->hw; in igb_check_wvbr()
4088 adapter->wvbr |= wvbr; in igb_check_wvbr()
4093 static void igb_spoof_check(struct igb_adapter *adapter) in igb_spoof_check() argument
4097 if (!adapter->wvbr) in igb_spoof_check()
4100 for (j = 0; j < adapter->vfs_allocated_count; j++) { in igb_spoof_check()
4101 if (adapter->wvbr & (1 << j) || in igb_spoof_check()
4102 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { in igb_spoof_check()
4103 dev_warn(&adapter->pdev->dev, in igb_spoof_check()
4105 adapter->wvbr &= in igb_spoof_check()
4117 struct igb_adapter *adapter = (struct igb_adapter *) data; in igb_update_phy_info() local
4118 igb_get_phy_info(&adapter->hw); in igb_update_phy_info()
4125 bool igb_has_link(struct igb_adapter *adapter) in igb_has_link() argument
4127 struct e1000_hw *hw = &adapter->hw; in igb_has_link()
4151 if (!netif_carrier_ok(adapter->netdev)) { in igb_has_link()
4152 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
4153 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { in igb_has_link()
4154 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
4155 adapter->link_check_timeout = jiffies; in igb_has_link()
4185 static void igb_check_lvmmc(struct igb_adapter *adapter) in igb_check_lvmmc() argument
4187 struct e1000_hw *hw = &adapter->hw; in igb_check_lvmmc()
4193 netdev_warn(adapter->netdev, in igb_check_lvmmc()
4206 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_watchdog() local
4208 schedule_work(&adapter->watchdog_task); in igb_watchdog()
4213 struct igb_adapter *adapter = container_of(work, in igb_watchdog_task() local
4216 struct e1000_hw *hw = &adapter->hw; in igb_watchdog_task()
4218 struct net_device *netdev = adapter->netdev; in igb_watchdog_task()
4223 link = igb_has_link(adapter); in igb_watchdog_task()
4225 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { in igb_watchdog_task()
4226 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igb_watchdog_task()
4227 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_watchdog_task()
4233 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
4244 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_watchdog_task()
4245 igb_reset(adapter); in igb_watchdog_task()
4254 &adapter->link_speed, in igb_watchdog_task()
4255 &adapter->link_duplex); in igb_watchdog_task()
4262 adapter->link_speed, in igb_watchdog_task()
4263 adapter->link_duplex == FULL_DUPLEX ? in igb_watchdog_task()
4271 if ((adapter->flags & IGB_FLAG_EEE) && in igb_watchdog_task()
4272 (adapter->link_duplex == HALF_DUPLEX)) { in igb_watchdog_task()
4273 dev_info(&adapter->pdev->dev, in igb_watchdog_task()
4275 adapter->hw.dev_spec._82575.eee_disable = true; in igb_watchdog_task()
4276 adapter->flags &= ~IGB_FLAG_EEE; in igb_watchdog_task()
4290 adapter->tx_timeout_factor = 1; in igb_watchdog_task()
4291 switch (adapter->link_speed) { in igb_watchdog_task()
4293 adapter->tx_timeout_factor = 14; in igb_watchdog_task()
4302 igb_ping_all_vfs(adapter); in igb_watchdog_task()
4303 igb_check_vf_rate_limit(adapter); in igb_watchdog_task()
4306 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
4307 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
4312 adapter->link_speed = 0; in igb_watchdog_task()
4313 adapter->link_duplex = 0; in igb_watchdog_task()
4326 igb_ping_all_vfs(adapter); in igb_watchdog_task()
4329 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
4330 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
4334 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
4335 igb_check_swap_media(adapter); in igb_watchdog_task()
4336 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
4337 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4347 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_watchdog_task()
4348 igb_check_swap_media(adapter); in igb_watchdog_task()
4349 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
4350 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4357 spin_lock(&adapter->stats64_lock); in igb_watchdog_task()
4358 igb_update_stats(adapter, &adapter->stats64); in igb_watchdog_task()
4359 spin_unlock(&adapter->stats64_lock); in igb_watchdog_task()
4361 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_watchdog_task()
4362 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task()
4370 adapter->tx_timeout_count++; in igb_watchdog_task()
4371 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4382 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_watchdog_task()
4385 for (i = 0; i < adapter->num_q_vectors; i++) in igb_watchdog_task()
4386 eics |= adapter->q_vector[i]->eims_value; in igb_watchdog_task()
4392 igb_spoof_check(adapter); in igb_watchdog_task()
4393 igb_ptp_rx_hang(adapter); in igb_watchdog_task()
4396 if ((adapter->hw.mac.type == e1000_i350) || in igb_watchdog_task()
4397 (adapter->hw.mac.type == e1000_i354)) in igb_watchdog_task()
4398 igb_check_lvmmc(adapter); in igb_watchdog_task()
4401 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_watchdog_task()
4402 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) in igb_watchdog_task()
4403 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
4406 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
4437 struct igb_adapter *adapter = q_vector->adapter; in igb_update_ring_itr() local
4443 if (adapter->link_speed != SPEED_1000) { in igb_update_ring_itr()
4475 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_update_ring_itr()
4476 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_update_ring_itr()
4561 struct igb_adapter *adapter = q_vector->adapter; in igb_set_itr() local
4566 if (adapter->link_speed != SPEED_1000) { in igb_set_itr()
4579 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_set_itr()
4580 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_set_itr()
5026 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring() local
5029 &adapter->state)) { in igb_xmit_frame_ring()
5033 adapter->ptp_tx_skb = skb_get(skb); in igb_xmit_frame_ring()
5034 adapter->ptp_tx_start = jiffies; in igb_xmit_frame_ring()
5035 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
5036 schedule_work(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
5067 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, in igb_tx_queue_mapping() argument
5072 if (r_idx >= adapter->num_tx_queues) in igb_tx_queue_mapping()
5073 r_idx = r_idx % adapter->num_tx_queues; in igb_tx_queue_mapping()
5075 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
5081 struct igb_adapter *adapter = netdev_priv(netdev); in igb_xmit_frame() local
5083 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_xmit_frame()
5099 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); in igb_xmit_frame()
5108 struct igb_adapter *adapter = netdev_priv(netdev); in igb_tx_timeout() local
5109 struct e1000_hw *hw = &adapter->hw; in igb_tx_timeout()
5112 adapter->tx_timeout_count++; in igb_tx_timeout()
5117 schedule_work(&adapter->reset_task); in igb_tx_timeout()
5119 (adapter->eims_enable_mask & ~adapter->eims_other)); in igb_tx_timeout()
5124 struct igb_adapter *adapter; in igb_reset_task() local
5125 adapter = container_of(work, struct igb_adapter, reset_task); in igb_reset_task()
5127 igb_dump(adapter); in igb_reset_task()
5128 netdev_err(adapter->netdev, "Reset adapter\n"); in igb_reset_task()
5129 igb_reinit_locked(adapter); in igb_reset_task()
5140 struct igb_adapter *adapter = netdev_priv(netdev); in igb_get_stats64() local
5142 spin_lock(&adapter->stats64_lock); in igb_get_stats64()
5143 igb_update_stats(adapter, &adapter->stats64); in igb_get_stats64()
5144 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igb_get_stats64()
5145 spin_unlock(&adapter->stats64_lock); in igb_get_stats64()
5159 struct igb_adapter *adapter = netdev_priv(netdev); in igb_change_mtu() local
5160 struct pci_dev *pdev = adapter->pdev; in igb_change_mtu()
5178 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_change_mtu()
5182 adapter->max_frame_size = max_frame; in igb_change_mtu()
5185 igb_down(adapter); in igb_change_mtu()
5192 igb_up(adapter); in igb_change_mtu()
5194 igb_reset(adapter); in igb_change_mtu()
5196 clear_bit(__IGB_RESETTING, &adapter->state); in igb_change_mtu()
5205 void igb_update_stats(struct igb_adapter *adapter, in igb_update_stats() argument
5208 struct e1000_hw *hw = &adapter->hw; in igb_update_stats()
5209 struct pci_dev *pdev = adapter->pdev; in igb_update_stats()
5219 if (adapter->link_speed == 0) in igb_update_stats()
5228 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_update_stats()
5229 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
5253 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_update_stats()
5254 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
5268 adapter->stats.crcerrs += rd32(E1000_CRCERRS); in igb_update_stats()
5269 adapter->stats.gprc += rd32(E1000_GPRC); in igb_update_stats()
5270 adapter->stats.gorc += rd32(E1000_GORCL); in igb_update_stats()
5272 adapter->stats.bprc += rd32(E1000_BPRC); in igb_update_stats()
5273 adapter->stats.mprc += rd32(E1000_MPRC); in igb_update_stats()
5274 adapter->stats.roc += rd32(E1000_ROC); in igb_update_stats()
5276 adapter->stats.prc64 += rd32(E1000_PRC64); in igb_update_stats()
5277 adapter->stats.prc127 += rd32(E1000_PRC127); in igb_update_stats()
5278 adapter->stats.prc255 += rd32(E1000_PRC255); in igb_update_stats()
5279 adapter->stats.prc511 += rd32(E1000_PRC511); in igb_update_stats()
5280 adapter->stats.prc1023 += rd32(E1000_PRC1023); in igb_update_stats()
5281 adapter->stats.prc1522 += rd32(E1000_PRC1522); in igb_update_stats()
5282 adapter->stats.symerrs += rd32(E1000_SYMERRS); in igb_update_stats()
5283 adapter->stats.sec += rd32(E1000_SEC); in igb_update_stats()
5286 adapter->stats.mpc += mpc; in igb_update_stats()
5288 adapter->stats.scc += rd32(E1000_SCC); in igb_update_stats()
5289 adapter->stats.ecol += rd32(E1000_ECOL); in igb_update_stats()
5290 adapter->stats.mcc += rd32(E1000_MCC); in igb_update_stats()
5291 adapter->stats.latecol += rd32(E1000_LATECOL); in igb_update_stats()
5292 adapter->stats.dc += rd32(E1000_DC); in igb_update_stats()
5293 adapter->stats.rlec += rd32(E1000_RLEC); in igb_update_stats()
5294 adapter->stats.xonrxc += rd32(E1000_XONRXC); in igb_update_stats()
5295 adapter->stats.xontxc += rd32(E1000_XONTXC); in igb_update_stats()
5296 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); in igb_update_stats()
5297 adapter->stats.xofftxc += rd32(E1000_XOFFTXC); in igb_update_stats()
5298 adapter->stats.fcruc += rd32(E1000_FCRUC); in igb_update_stats()
5299 adapter->stats.gptc += rd32(E1000_GPTC); in igb_update_stats()
5300 adapter->stats.gotc += rd32(E1000_GOTCL); in igb_update_stats()
5302 adapter->stats.rnbc += rd32(E1000_RNBC); in igb_update_stats()
5303 adapter->stats.ruc += rd32(E1000_RUC); in igb_update_stats()
5304 adapter->stats.rfc += rd32(E1000_RFC); in igb_update_stats()
5305 adapter->stats.rjc += rd32(E1000_RJC); in igb_update_stats()
5306 adapter->stats.tor += rd32(E1000_TORH); in igb_update_stats()
5307 adapter->stats.tot += rd32(E1000_TOTH); in igb_update_stats()
5308 adapter->stats.tpr += rd32(E1000_TPR); in igb_update_stats()
5310 adapter->stats.ptc64 += rd32(E1000_PTC64); in igb_update_stats()
5311 adapter->stats.ptc127 += rd32(E1000_PTC127); in igb_update_stats()
5312 adapter->stats.ptc255 += rd32(E1000_PTC255); in igb_update_stats()
5313 adapter->stats.ptc511 += rd32(E1000_PTC511); in igb_update_stats()
5314 adapter->stats.ptc1023 += rd32(E1000_PTC1023); in igb_update_stats()
5315 adapter->stats.ptc1522 += rd32(E1000_PTC1522); in igb_update_stats()
5317 adapter->stats.mptc += rd32(E1000_MPTC); in igb_update_stats()
5318 adapter->stats.bptc += rd32(E1000_BPTC); in igb_update_stats()
5320 adapter->stats.tpt += rd32(E1000_TPT); in igb_update_stats()
5321 adapter->stats.colc += rd32(E1000_COLC); in igb_update_stats()
5323 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); in igb_update_stats()
5327 adapter->stats.rxerrc += rd32(E1000_RXERRC); in igb_update_stats()
5332 adapter->stats.tncrs += rd32(E1000_TNCRS); in igb_update_stats()
5335 adapter->stats.tsctc += rd32(E1000_TSCTC); in igb_update_stats()
5336 adapter->stats.tsctfc += rd32(E1000_TSCTFC); in igb_update_stats()
5338 adapter->stats.iac += rd32(E1000_IAC); in igb_update_stats()
5339 adapter->stats.icrxoc += rd32(E1000_ICRXOC); in igb_update_stats()
5340 adapter->stats.icrxptc += rd32(E1000_ICRXPTC); in igb_update_stats()
5341 adapter->stats.icrxatc += rd32(E1000_ICRXATC); in igb_update_stats()
5342 adapter->stats.ictxptc += rd32(E1000_ICTXPTC); in igb_update_stats()
5343 adapter->stats.ictxatc += rd32(E1000_ICTXATC); in igb_update_stats()
5344 adapter->stats.ictxqec += rd32(E1000_ICTXQEC); in igb_update_stats()
5345 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); in igb_update_stats()
5346 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); in igb_update_stats()
5349 net_stats->multicast = adapter->stats.mprc; in igb_update_stats()
5350 net_stats->collisions = adapter->stats.colc; in igb_update_stats()
5357 net_stats->rx_errors = adapter->stats.rxerrc + in igb_update_stats()
5358 adapter->stats.crcerrs + adapter->stats.algnerrc + in igb_update_stats()
5359 adapter->stats.ruc + adapter->stats.roc + in igb_update_stats()
5360 adapter->stats.cexterr; in igb_update_stats()
5361 net_stats->rx_length_errors = adapter->stats.ruc + in igb_update_stats()
5362 adapter->stats.roc; in igb_update_stats()
5363 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igb_update_stats()
5364 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igb_update_stats()
5365 net_stats->rx_missed_errors = adapter->stats.mpc; in igb_update_stats()
5368 net_stats->tx_errors = adapter->stats.ecol + in igb_update_stats()
5369 adapter->stats.latecol; in igb_update_stats()
5370 net_stats->tx_aborted_errors = adapter->stats.ecol; in igb_update_stats()
5371 net_stats->tx_window_errors = adapter->stats.latecol; in igb_update_stats()
5372 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igb_update_stats()
5377 adapter->stats.mgptc += rd32(E1000_MGTPTC); in igb_update_stats()
5378 adapter->stats.mgprc += rd32(E1000_MGTPRC); in igb_update_stats()
5379 adapter->stats.mgpdc += rd32(E1000_MGTPDC); in igb_update_stats()
5384 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); in igb_update_stats()
5385 adapter->stats.o2bspc += rd32(E1000_O2BSPC); in igb_update_stats()
5386 adapter->stats.b2ospc += rd32(E1000_B2OSPC); in igb_update_stats()
5387 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); in igb_update_stats()
5391 static void igb_tsync_interrupt(struct igb_adapter *adapter) in igb_tsync_interrupt() argument
5393 struct e1000_hw *hw = &adapter->hw; in igb_tsync_interrupt()
5400 if (adapter->ptp_caps.pps) in igb_tsync_interrupt()
5401 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5403 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); in igb_tsync_interrupt()
5409 schedule_work(&adapter->ptp_tx_work); in igb_tsync_interrupt()
5414 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5415 ts = timespec64_add(adapter->perout[0].start, in igb_tsync_interrupt()
5416 adapter->perout[0].period); in igb_tsync_interrupt()
5423 adapter->perout[0].start = ts; in igb_tsync_interrupt()
5424 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5429 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5430 ts = timespec64_add(adapter->perout[1].start, in igb_tsync_interrupt()
5431 adapter->perout[1].period); in igb_tsync_interrupt()
5437 adapter->perout[1].start = ts; in igb_tsync_interrupt()
5438 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5448 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5458 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5468 struct igb_adapter *adapter = data; in igb_msix_other() local
5469 struct e1000_hw *hw = &adapter->hw; in igb_msix_other()
5474 schedule_work(&adapter->reset_task); in igb_msix_other()
5478 adapter->stats.doosync++; in igb_msix_other()
5483 igb_check_wvbr(adapter); in igb_msix_other()
5488 igb_msg_task(adapter); in igb_msix_other()
5493 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_msix_other()
5494 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_msix_other()
5498 igb_tsync_interrupt(adapter); in igb_msix_other()
5500 wr32(E1000_EIMS, adapter->eims_other); in igb_msix_other()
5507 struct igb_adapter *adapter = q_vector->adapter; in igb_write_itr() local
5516 if (adapter->hw.mac.type == e1000_82575) in igb_write_itr()
5538 static void igb_update_tx_dca(struct igb_adapter *adapter, in igb_update_tx_dca() argument
5542 struct e1000_hw *hw = &adapter->hw; in igb_update_tx_dca()
5559 static void igb_update_rx_dca(struct igb_adapter *adapter, in igb_update_rx_dca() argument
5563 struct e1000_hw *hw = &adapter->hw; in igb_update_rx_dca()
5564 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); in igb_update_rx_dca()
5581 struct igb_adapter *adapter = q_vector->adapter; in igb_update_dca() local
5588 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); in igb_update_dca()
5591 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); in igb_update_dca()
5598 static void igb_setup_dca(struct igb_adapter *adapter) in igb_setup_dca() argument
5600 struct e1000_hw *hw = &adapter->hw; in igb_setup_dca()
5603 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) in igb_setup_dca()
5609 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_setup_dca()
5610 adapter->q_vector[i]->cpu = -1; in igb_setup_dca()
5611 igb_update_dca(adapter->q_vector[i]); in igb_setup_dca()
5618 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_notify_dca() local
5619 struct pci_dev *pdev = adapter->pdev; in __igb_notify_dca()
5620 struct e1000_hw *hw = &adapter->hw; in __igb_notify_dca()
5626 if (adapter->flags & IGB_FLAG_DCA_ENABLED) in __igb_notify_dca()
5629 adapter->flags |= IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
5631 igb_setup_dca(adapter); in __igb_notify_dca()
5636 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in __igb_notify_dca()
5642 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
5664 static int igb_vf_configure(struct igb_adapter *adapter, int vf) in igb_vf_configure() argument
5669 igb_set_vf_mac(adapter, vf, mac_addr); in igb_vf_configure()
5672 adapter->vf_data[vf].spoofchk_enabled = true; in igb_vf_configure()
5678 static void igb_ping_all_vfs(struct igb_adapter *adapter) in igb_ping_all_vfs() argument
5680 struct e1000_hw *hw = &adapter->hw; in igb_ping_all_vfs()
5684 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { in igb_ping_all_vfs()
5686 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) in igb_ping_all_vfs()
5692 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) in igb_set_vf_promisc() argument
5694 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_promisc()
5696 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_promisc()
5731 static int igb_set_vf_multicasts(struct igb_adapter *adapter, in igb_set_vf_multicasts() argument
5736 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_multicasts()
5754 igb_set_rx_mode(adapter->netdev); in igb_set_vf_multicasts()
5759 static void igb_restore_vf_multicasts(struct igb_adapter *adapter) in igb_restore_vf_multicasts() argument
5761 struct e1000_hw *hw = &adapter->hw; in igb_restore_vf_multicasts()
5765 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_restore_vf_multicasts()
5770 vf_data = &adapter->vf_data[i]; in igb_restore_vf_multicasts()
5784 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) in igb_clear_vf_vfta() argument
5786 struct e1000_hw *hw = &adapter->hw; in igb_clear_vf_vfta()
5810 adapter->vf_data[vf].vlans_enabled = 0; in igb_clear_vf_vfta()
5813 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) in igb_vlvf_set() argument
5815 struct e1000_hw *hw = &adapter->hw; in igb_vlvf_set()
5823 if (!adapter->vfs_allocated_count) in igb_vlvf_set()
5861 if (vf >= adapter->vfs_allocated_count) in igb_vlvf_set()
5864 if (!adapter->vf_data[vf].vlans_enabled) { in igb_vlvf_set()
5875 adapter->vf_data[vf].vlans_enabled++; in igb_vlvf_set()
5889 if (vf >= adapter->vfs_allocated_count) in igb_vlvf_set()
5892 adapter->vf_data[vf].vlans_enabled--; in igb_vlvf_set()
5893 if (!adapter->vf_data[vf].vlans_enabled) { in igb_vlvf_set()
5908 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) in igb_set_vmvir() argument
5910 struct e1000_hw *hw = &adapter->hw; in igb_set_vmvir()
5922 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_vlan() local
5924 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) in igb_ndo_set_vf_vlan()
5927 err = igb_vlvf_set(adapter, vlan, !!vlan, vf); in igb_ndo_set_vf_vlan()
5930 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); in igb_ndo_set_vf_vlan()
5931 igb_set_vmolr(adapter, vf, !vlan); in igb_ndo_set_vf_vlan()
5932 adapter->vf_data[vf].pf_vlan = vlan; in igb_ndo_set_vf_vlan()
5933 adapter->vf_data[vf].pf_qos = qos; in igb_ndo_set_vf_vlan()
5934 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5936 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_vlan()
5937 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5939 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5943 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, in igb_ndo_set_vf_vlan()
5945 igb_set_vmvir(adapter, vlan, vf); in igb_ndo_set_vf_vlan()
5946 igb_set_vmolr(adapter, vf, true); in igb_ndo_set_vf_vlan()
5947 adapter->vf_data[vf].pf_vlan = 0; in igb_ndo_set_vf_vlan()
5948 adapter->vf_data[vf].pf_qos = 0; in igb_ndo_set_vf_vlan()
5954 static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) in igb_find_vlvf_entry() argument
5956 struct e1000_hw *hw = &adapter->hw; in igb_find_vlvf_entry()
5974 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) in igb_set_vf_vlan() argument
5976 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan()
5984 if (add && (adapter->netdev->flags & IFF_PROMISC)) in igb_set_vf_vlan()
5985 err = igb_vlvf_set(adapter, vid, add, in igb_set_vf_vlan()
5986 adapter->vfs_allocated_count); in igb_set_vf_vlan()
5990 err = igb_vlvf_set(adapter, vid, add, vf); in igb_set_vf_vlan()
5998 if (!add && (adapter->netdev->flags & IFF_PROMISC)) { in igb_set_vf_vlan()
6000 int regndx = igb_find_vlvf_entry(adapter, vid); in igb_set_vf_vlan()
6009 adapter->vfs_allocated_count); in igb_set_vf_vlan()
6015 !test_bit(vid, adapter->active_vlans) && in igb_set_vf_vlan()
6017 igb_vlvf_set(adapter, vid, add, in igb_set_vf_vlan()
6018 adapter->vfs_allocated_count); in igb_set_vf_vlan()
6025 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) in igb_vf_reset() argument
6028 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_vf_reset()
6029 adapter->vf_data[vf].last_nack = jiffies; in igb_vf_reset()
6032 igb_set_vmolr(adapter, vf, true); in igb_vf_reset()
6035 igb_clear_vf_vfta(adapter, vf); in igb_vf_reset()
6036 if (adapter->vf_data[vf].pf_vlan) in igb_vf_reset()
6037 igb_ndo_set_vf_vlan(adapter->netdev, vf, in igb_vf_reset()
6038 adapter->vf_data[vf].pf_vlan, in igb_vf_reset()
6039 adapter->vf_data[vf].pf_qos); in igb_vf_reset()
6041 igb_clear_vf_vfta(adapter, vf); in igb_vf_reset()
6044 adapter->vf_data[vf].num_vf_mc_hashes = 0; in igb_vf_reset()
6047 igb_set_rx_mode(adapter->netdev); in igb_vf_reset()
6050 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) in igb_vf_reset_event() argument
6052 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_event()
6055 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) in igb_vf_reset_event()
6059 igb_vf_reset(adapter, vf); in igb_vf_reset_event()
6062 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) in igb_vf_reset_msg() argument
6064 struct e1000_hw *hw = &adapter->hw; in igb_vf_reset_msg()
6065 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_msg()
6071 igb_vf_reset(adapter, vf); in igb_vf_reset_msg()
6074 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); in igb_vf_reset_msg()
6082 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; in igb_vf_reset_msg()
6094 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) in igb_set_vf_mac_addr() argument
6103 err = igb_set_vf_mac(adapter, vf, addr); in igb_set_vf_mac_addr()
6108 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) in igb_rcv_ack_from_vf() argument
6110 struct e1000_hw *hw = &adapter->hw; in igb_rcv_ack_from_vf()
6111 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_ack_from_vf()
6122 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) in igb_rcv_msg_from_vf() argument
6124 struct pci_dev *pdev = adapter->pdev; in igb_rcv_msg_from_vf()
6126 struct e1000_hw *hw = &adapter->hw; in igb_rcv_msg_from_vf()
6127 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_msg_from_vf()
6149 igb_vf_reset_msg(adapter, vf); in igb_rcv_msg_from_vf()
6164 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6171 retval = igb_set_vf_promisc(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6174 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6177 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); in igb_rcv_msg_from_vf()
6186 retval = igb_set_vf_vlan(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6205 static void igb_msg_task(struct igb_adapter *adapter) in igb_msg_task() argument
6207 struct e1000_hw *hw = &adapter->hw; in igb_msg_task()
6210 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { in igb_msg_task()
6213 igb_vf_reset_event(adapter, vf); in igb_msg_task()
6217 igb_rcv_msg_from_vf(adapter, vf); in igb_msg_task()
6221 igb_rcv_ack_from_vf(adapter, vf); in igb_msg_task()
6235 static void igb_set_uta(struct igb_adapter *adapter) in igb_set_uta() argument
6237 struct e1000_hw *hw = &adapter->hw; in igb_set_uta()
6245 if (!adapter->vfs_allocated_count) in igb_set_uta()
6259 struct igb_adapter *adapter = data; in igb_intr_msi() local
6260 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr_msi()
6261 struct e1000_hw *hw = &adapter->hw; in igb_intr_msi()
6268 schedule_work(&adapter->reset_task); in igb_intr_msi()
6272 adapter->stats.doosync++; in igb_intr_msi()
6277 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr_msi()
6278 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr_msi()
6282 igb_tsync_interrupt(adapter); in igb_intr_msi()
6296 struct igb_adapter *adapter = data; in igb_intr() local
6297 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr()
6298 struct e1000_hw *hw = &adapter->hw; in igb_intr()
6313 schedule_work(&adapter->reset_task); in igb_intr()
6317 adapter->stats.doosync++; in igb_intr()
6323 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr()
6324 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr()
6328 igb_tsync_interrupt(adapter); in igb_intr()
6337 struct igb_adapter *adapter = q_vector->adapter; in igb_ring_irq_enable() local
6338 struct e1000_hw *hw = &adapter->hw; in igb_ring_irq_enable()
6340 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igb_ring_irq_enable()
6341 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igb_ring_irq_enable()
6342 if ((adapter->num_q_vectors == 1) && !adapter->vf_data) in igb_ring_irq_enable()
6348 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_ring_irq_enable()
6349 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_ring_irq_enable()
6352 igb_irq_enable(adapter); in igb_ring_irq_enable()
6370 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) in igb_poll()
6402 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_tx_irq() local
6410 if (test_bit(__IGB_DOWN, &adapter->state)) in igb_clean_tx_irq()
6501 struct e1000_hw *hw = &adapter->hw; in igb_clean_tx_irq()
6509 (adapter->tx_timeout_factor * HZ)) && in igb_clean_tx_irq()
6552 !(test_bit(__IGB_DOWN, &adapter->state))) { in igb_clean_tx_irq()
7095 struct igb_adapter *adapter = netdev_priv(netdev); in igb_mii_ioctl() local
7098 if (adapter->hw.phy.media_type != e1000_media_type_copper) in igb_mii_ioctl()
7103 data->phy_id = adapter->hw.phy.addr; in igb_mii_ioctl()
7106 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in igb_mii_ioctl()
7141 struct igb_adapter *adapter = hw->back; in igb_read_pci_cfg() local
7143 pci_read_config_word(adapter->pdev, reg, value); in igb_read_pci_cfg()
7148 struct igb_adapter *adapter = hw->back; in igb_write_pci_cfg() local
7150 pci_write_config_word(adapter->pdev, reg, *value); in igb_write_pci_cfg()
7155 struct igb_adapter *adapter = hw->back; in igb_read_pcie_cap_reg() local
7157 if (pcie_capability_read_word(adapter->pdev, reg, value)) in igb_read_pcie_cap_reg()
7165 struct igb_adapter *adapter = hw->back; in igb_write_pcie_cap_reg() local
7167 if (pcie_capability_write_word(adapter->pdev, reg, *value)) in igb_write_pcie_cap_reg()
7175 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_mode() local
7176 struct e1000_hw *hw = &adapter->hw; in igb_vlan_mode()
7197 igb_rlpml_set(adapter); in igb_vlan_mode()
7203 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_rx_add_vid() local
7204 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_add_vid()
7205 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_add_vid()
7208 igb_vlvf_set(adapter, vid, true, pf_id); in igb_vlan_rx_add_vid()
7213 set_bit(vid, adapter->active_vlans); in igb_vlan_rx_add_vid()
7221 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_rx_kill_vid() local
7222 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_kill_vid()
7223 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_kill_vid()
7227 err = igb_vlvf_set(adapter, vid, false, pf_id); in igb_vlan_rx_kill_vid()
7233 clear_bit(vid, adapter->active_vlans); in igb_vlan_rx_kill_vid()
7238 static void igb_restore_vlan(struct igb_adapter *adapter) in igb_restore_vlan() argument
7242 igb_vlan_mode(adapter->netdev, adapter->netdev->features); in igb_restore_vlan()
7244 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in igb_restore_vlan()
7245 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igb_restore_vlan()
7248 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) in igb_set_spd_dplx() argument
7250 struct pci_dev *pdev = adapter->pdev; in igb_set_spd_dplx()
7251 struct e1000_mac_info *mac = &adapter->hw.mac; in igb_set_spd_dplx()
7264 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { in igb_set_spd_dplx()
7290 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; in igb_set_spd_dplx()
7298 adapter->hw.phy.mdix = AUTO_ALL_MODES; in igb_set_spd_dplx()
7311 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_shutdown() local
7312 struct e1000_hw *hw = &adapter->hw; in __igb_shutdown()
7314 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; in __igb_shutdown()
7324 igb_clear_interrupt_scheme(adapter); in __igb_shutdown()
7337 igb_setup_rctl(adapter); in __igb_shutdown()
7365 *enable_wake = wufc || adapter->en_mng_pt; in __igb_shutdown()
7367 igb_power_down_link(adapter); in __igb_shutdown()
7369 igb_power_up_link(adapter); in __igb_shutdown()
7374 igb_release_hw_control(adapter); in __igb_shutdown()
7408 struct igb_adapter *adapter = netdev_priv(netdev); in igb_resume() local
7409 struct e1000_hw *hw = &adapter->hw; in igb_resume()
7429 if (igb_init_interrupt_scheme(adapter, true)) { in igb_resume()
7435 igb_reset(adapter); in igb_resume()
7440 igb_get_hw_control(adapter); in igb_resume()
7460 struct igb_adapter *adapter = netdev_priv(netdev); in igb_runtime_idle() local
7462 if (!igb_has_link(adapter)) in igb_runtime_idle()
7510 struct igb_adapter *adapter = netdev_priv(netdev); in igb_sriov_reinit() local
7511 struct pci_dev *pdev = adapter->pdev; in igb_sriov_reinit()
7518 igb_reset(adapter); in igb_sriov_reinit()
7520 igb_clear_interrupt_scheme(adapter); in igb_sriov_reinit()
7522 igb_init_queue_configuration(adapter); in igb_sriov_reinit()
7524 if (igb_init_interrupt_scheme(adapter, true)) { in igb_sriov_reinit()
7582 struct igb_adapter *adapter = netdev_priv(netdev); in igb_netpoll() local
7583 struct e1000_hw *hw = &adapter->hw; in igb_netpoll()
7587 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_netpoll()
7588 q_vector = adapter->q_vector[i]; in igb_netpoll()
7589 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_netpoll()
7592 igb_irq_disable(adapter); in igb_netpoll()
7610 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_error_detected() local
7618 igb_down(adapter); in igb_io_error_detected()
7635 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_slot_reset() local
7636 struct e1000_hw *hw = &adapter->hw; in igb_io_slot_reset()
7652 igb_reset(adapter); in igb_io_slot_reset()
7679 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_resume() local
7682 if (igb_up(adapter)) { in igb_io_resume()
7693 igb_get_hw_control(adapter); in igb_io_resume()
7696 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, in igb_rar_set_qsel() argument
7700 struct e1000_hw *hw = &adapter->hw; in igb_rar_set_qsel()
7723 static int igb_set_vf_mac(struct igb_adapter *adapter, in igb_set_vf_mac() argument
7726 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_mac()
7732 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); in igb_set_vf_mac()
7734 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); in igb_set_vf_mac()
7741 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_mac() local
7742 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) in igb_ndo_set_vf_mac()
7744 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
7745 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); in igb_ndo_set_vf_mac()
7746 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7748 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_mac()
7749 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7751 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7754 return igb_set_vf_mac(adapter, vf, mac); in igb_ndo_set_vf_mac()
7798 static void igb_check_vf_rate_limit(struct igb_adapter *adapter) in igb_check_vf_rate_limit() argument
7804 if ((adapter->vf_rate_link_speed == 0) || in igb_check_vf_rate_limit()
7805 (adapter->hw.mac.type != e1000_82576)) in igb_check_vf_rate_limit()
7808 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_check_vf_rate_limit()
7809 if (actual_link_speed != adapter->vf_rate_link_speed) { in igb_check_vf_rate_limit()
7811 adapter->vf_rate_link_speed = 0; in igb_check_vf_rate_limit()
7812 dev_info(&adapter->pdev->dev, in igb_check_vf_rate_limit()
7816 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_check_vf_rate_limit()
7818 adapter->vf_data[i].tx_rate = 0; in igb_check_vf_rate_limit()
7820 igb_set_vf_rate_limit(&adapter->hw, i, in igb_check_vf_rate_limit()
7821 adapter->vf_data[i].tx_rate, in igb_check_vf_rate_limit()
7829 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_bw() local
7830 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_bw()
7839 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_ndo_set_vf_bw()
7840 if ((vf >= adapter->vfs_allocated_count) || in igb_ndo_set_vf_bw()
7846 adapter->vf_rate_link_speed = actual_link_speed; in igb_ndo_set_vf_bw()
7847 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; in igb_ndo_set_vf_bw()
7856 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_spoofchk() local
7857 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_spoofchk()
7860 if (!adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
7863 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
7876 adapter->vf_data[vf].spoofchk_enabled = setting; in igb_ndo_set_vf_spoofchk()
7883 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_get_vf_config() local
7884 if (vf >= adapter->vfs_allocated_count) in igb_ndo_get_vf_config()
7887 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); in igb_ndo_get_vf_config()
7888 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; in igb_ndo_get_vf_config()
7890 ivi->vlan = adapter->vf_data[vf].pf_vlan; in igb_ndo_get_vf_config()
7891 ivi->qos = adapter->vf_data[vf].pf_qos; in igb_ndo_get_vf_config()
7892 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; in igb_ndo_get_vf_config()
7896 static void igb_vmm_control(struct igb_adapter *adapter) in igb_vmm_control() argument
7898 struct e1000_hw *hw = &adapter->hw; in igb_vmm_control()
7926 if (adapter->vfs_allocated_count) { in igb_vmm_control()
7930 adapter->vfs_allocated_count); in igb_vmm_control()
7937 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) in igb_init_dmac() argument
7939 struct e1000_hw *hw = &adapter->hw; in igb_init_dmac()
7944 if (adapter->flags & IGB_FLAG_DMAC) { in igb_init_dmac()
7954 hwm = 64 * pba - adapter->max_frame_size / 16; in igb_init_dmac()
7966 dmac_thr = pba - adapter->max_frame_size / 512; in igb_init_dmac()
7999 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); in igb_init_dmac()
8029 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); in igb_read_i2c_byte() local
8030 struct i2c_client *this_client = adapter->i2c_client; in igb_read_i2c_byte()
8066 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); in igb_write_i2c_byte() local
8067 struct i2c_client *this_client = adapter->i2c_client; in igb_write_i2c_byte()
8086 int igb_reinit_queues(struct igb_adapter *adapter) in igb_reinit_queues() argument
8088 struct net_device *netdev = adapter->netdev; in igb_reinit_queues()
8089 struct pci_dev *pdev = adapter->pdev; in igb_reinit_queues()
8095 igb_reset_interrupt_capability(adapter); in igb_reinit_queues()
8097 if (igb_init_interrupt_scheme(adapter, true)) { in igb_reinit_queues()