Lines Matching refs:np

964 static bool nv_optimized(struct fe_priv *np)  in nv_optimized()  argument
966 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) in nv_optimized()
1001 struct fe_priv *np = get_nvpriv(dev); in setup_hw_rings() local
1004 if (!nv_optimized(np)) { in setup_hw_rings()
1006 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1008 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysA… in setup_hw_rings()
1011 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); in setup_hw_rings()
1012 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh); in setup_hw_rings()
1015 …writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPh… in setup_hw_rings()
1016 …writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingP… in setup_hw_rings()
1023 struct fe_priv *np = get_nvpriv(dev); in free_rings() local
1025 if (!nv_optimized(np)) { in free_rings()
1026 if (np->rx_ring.orig) in free_rings()
1027 … pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), in free_rings()
1028 np->rx_ring.orig, np->ring_addr); in free_rings()
1030 if (np->rx_ring.ex) in free_rings()
1031 …pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_siz… in free_rings()
1032 np->rx_ring.ex, np->ring_addr); in free_rings()
1034 kfree(np->rx_skb); in free_rings()
1035 kfree(np->tx_skb); in free_rings()
1040 struct fe_priv *np = get_nvpriv(dev); in using_multi_irqs() local
1042 if (!(np->msi_flags & NV_MSI_X_ENABLED) || in using_multi_irqs()
1043 ((np->msi_flags & NV_MSI_X_ENABLED) && in using_multi_irqs()
1044 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) in using_multi_irqs()
1052 struct fe_priv *np = get_nvpriv(dev); in nv_txrx_gate() local
1056 if (!np->mac_in_use && in nv_txrx_gate()
1057 (np->driver_data & DEV_HAS_POWER_CNTRL)) { in nv_txrx_gate()
1069 struct fe_priv *np = get_nvpriv(dev); in nv_enable_irq() local
1072 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_enable_irq()
1073 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_enable_irq()
1075 enable_irq(np->pci_dev->irq); in nv_enable_irq()
1077 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_enable_irq()
1078 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_enable_irq()
1079 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_enable_irq()
1085 struct fe_priv *np = get_nvpriv(dev); in nv_disable_irq() local
1088 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_disable_irq()
1089 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); in nv_disable_irq()
1091 disable_irq(np->pci_dev->irq); in nv_disable_irq()
1093 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); in nv_disable_irq()
1094 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); in nv_disable_irq()
1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); in nv_disable_irq()
1109 struct fe_priv *np = get_nvpriv(dev); in nv_disable_hw_interrupts() local
1112 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_disable_hw_interrupts()
1115 if (np->msi_flags & NV_MSI_ENABLED) in nv_disable_hw_interrupts()
1123 struct fe_priv *np = get_nvpriv(dev); in nv_napi_enable() local
1125 napi_enable(&np->napi); in nv_napi_enable()
1130 struct fe_priv *np = get_nvpriv(dev); in nv_napi_disable() local
1132 napi_disable(&np->napi); in nv_napi_disable()
1178 struct fe_priv *np = netdev_priv(dev); in phy_reset() local
1183 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) in phy_reset()
1192 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_reset()
1200 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np) in init_realtek_8211b() argument
1217 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init)) in init_realtek_8211b()
1224 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np) in init_realtek_8211c() argument
1239 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ); in init_realtek_8211c()
1241 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) in init_realtek_8211c()
1243 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1246 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ); in init_realtek_8211c()
1249 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) in init_realtek_8211c()
1252 if (mii_rw(dev, np->phyaddr, in init_realtek_8211c()
1259 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np) in init_realtek_8201() argument
1263 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) { in init_realtek_8201()
1264 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201()
1267 if (mii_rw(dev, np->phyaddr, in init_realtek_8201()
1275 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np) in init_realtek_8201_cross() argument
1280 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1283 phy_reserved = mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1287 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1290 if (mii_rw(dev, np->phyaddr, in init_realtek_8201_cross()
1298 static int init_cicada(struct net_device *dev, struct fe_priv *np, in init_cicada() argument
1304 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); in init_cicada()
1307 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) in init_cicada()
1309 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in init_cicada()
1311 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) in init_cicada()
1314 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); in init_cicada()
1316 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) in init_cicada()
1322 static int init_vitesse(struct net_device *dev, struct fe_priv *np) in init_vitesse() argument
1326 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1329 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1332 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1334 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1336 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1340 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1342 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1345 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1348 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1352 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1354 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1356 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1358 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1361 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1364 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1366 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) in init_vitesse()
1368 phy_reserved = mii_rw(dev, np->phyaddr, in init_vitesse()
1372 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) in init_vitesse()
1374 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1377 if (mii_rw(dev, np->phyaddr, in init_vitesse()
1386 struct fe_priv *np = get_nvpriv(dev); in phy_init() local
1392 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in phy_init()
1393 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); in phy_init()
1395 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { in phy_init()
1397 pci_name(np->pci_dev)); in phy_init()
1401 if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1402 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1403 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1404 if (init_realtek_8211b(dev, np)) { in phy_init()
1406 pci_name(np->pci_dev)); in phy_init()
1409 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1410 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1411 if (init_realtek_8211c(dev, np)) { in phy_init()
1413 pci_name(np->pci_dev)); in phy_init()
1416 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1417 if (init_realtek_8201(dev, np)) { in phy_init()
1419 pci_name(np->pci_dev)); in phy_init()
1426 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in phy_init()
1430 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { in phy_init()
1432 pci_name(np->pci_dev)); in phy_init()
1440 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in phy_init()
1442 np->gigabit = PHY_GIGABIT; in phy_init()
1443 mii_control_1000 = mii_rw(dev, np->phyaddr, in phy_init()
1451 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { in phy_init()
1453 pci_name(np->pci_dev)); in phy_init()
1457 np->gigabit = 0; in phy_init()
1459 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1462 if (np->phy_oui == PHY_OUI_REALTEK && in phy_init()
1463 np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1464 np->phy_rev == PHY_REV_REALTEK_8211C) { in phy_init()
1467 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { in phy_init()
1469 pci_name(np->pci_dev)); in phy_init()
1478 pci_name(np->pci_dev)); in phy_init()
1484 if (np->phy_oui == PHY_OUI_CICADA) { in phy_init()
1485 if (init_cicada(dev, np, phyinterface)) { in phy_init()
1487 pci_name(np->pci_dev)); in phy_init()
1490 } else if (np->phy_oui == PHY_OUI_VITESSE) { in phy_init()
1491 if (init_vitesse(dev, np)) { in phy_init()
1493 pci_name(np->pci_dev)); in phy_init()
1496 } else if (np->phy_oui == PHY_OUI_REALTEK) { in phy_init()
1497 if (np->phy_model == PHY_MODEL_REALTEK_8211 && in phy_init()
1498 np->phy_rev == PHY_REV_REALTEK_8211B) { in phy_init()
1500 if (init_realtek_8211b(dev, np)) { in phy_init()
1502 pci_name(np->pci_dev)); in phy_init()
1505 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) { in phy_init()
1506 if (init_realtek_8201(dev, np) || in phy_init()
1507 init_realtek_8201_cross(dev, np)) { in phy_init()
1509 pci_name(np->pci_dev)); in phy_init()
1516 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); in phy_init()
1519 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in phy_init()
1523 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) in phy_init()
1531 struct fe_priv *np = netdev_priv(dev); in nv_start_rx() local
1536 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { in nv_start_rx()
1541 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_start_rx()
1544 if (np->mac_in_use) in nv_start_rx()
1552 struct fe_priv *np = netdev_priv(dev); in nv_stop_rx() local
1556 if (!np->mac_in_use) in nv_stop_rx()
1567 if (!np->mac_in_use) in nv_stop_rx()
1573 struct fe_priv *np = netdev_priv(dev); in nv_start_tx() local
1578 if (np->mac_in_use) in nv_start_tx()
1586 struct fe_priv *np = netdev_priv(dev); in nv_stop_tx() local
1590 if (!np->mac_in_use) in nv_stop_tx()
1601 if (!np->mac_in_use) in nv_stop_tx()
1620 struct fe_priv *np = netdev_priv(dev); in nv_txrx_reset() local
1623 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1626 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_txrx_reset()
1632 struct fe_priv *np = netdev_priv(dev); in nv_mac_reset() local
1636 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1656 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); in nv_mac_reset()
1663 struct fe_priv *np = netdev_priv(dev); in nv_update_stats() local
1670 assert_spin_locked(&np->hwstats_lock); in nv_update_stats()
1673 np->estats.tx_bytes += readl(base + NvRegTxCnt); in nv_update_stats()
1674 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt); in nv_update_stats()
1675 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt); in nv_update_stats()
1676 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt); in nv_update_stats()
1677 np->estats.tx_late_collision += readl(base + NvRegTxLateCol); in nv_update_stats()
1678 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow); in nv_update_stats()
1679 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier); in nv_update_stats()
1680 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef); in nv_update_stats()
1681 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr); in nv_update_stats()
1682 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr); in nv_update_stats()
1683 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte); in nv_update_stats()
1684 np->estats.rx_late_collision += readl(base + NvRegRxLateCol); in nv_update_stats()
1685 np->estats.rx_runt += readl(base + NvRegRxRunt); in nv_update_stats()
1686 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong); in nv_update_stats()
1687 np->estats.rx_over_errors += readl(base + NvRegRxOverflow); in nv_update_stats()
1688 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr); in nv_update_stats()
1689 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr); in nv_update_stats()
1690 np->estats.rx_length_error += readl(base + NvRegRxLenErr); in nv_update_stats()
1691 np->estats.rx_unicast += readl(base + NvRegRxUnicast); in nv_update_stats()
1692 np->estats.rx_multicast += readl(base + NvRegRxMulticast); in nv_update_stats()
1693 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast); in nv_update_stats()
1694 np->estats.rx_packets = in nv_update_stats()
1695 np->estats.rx_unicast + in nv_update_stats()
1696 np->estats.rx_multicast + in nv_update_stats()
1697 np->estats.rx_broadcast; in nv_update_stats()
1698 np->estats.rx_errors_total = in nv_update_stats()
1699 np->estats.rx_crc_errors + in nv_update_stats()
1700 np->estats.rx_over_errors + in nv_update_stats()
1701 np->estats.rx_frame_error + in nv_update_stats()
1702 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) + in nv_update_stats()
1703 np->estats.rx_late_collision + in nv_update_stats()
1704 np->estats.rx_runt + in nv_update_stats()
1705 np->estats.rx_frame_too_long; in nv_update_stats()
1706 np->estats.tx_errors_total = in nv_update_stats()
1707 np->estats.tx_late_collision + in nv_update_stats()
1708 np->estats.tx_fifo_errors + in nv_update_stats()
1709 np->estats.tx_carrier_errors + in nv_update_stats()
1710 np->estats.tx_excess_deferral + in nv_update_stats()
1711 np->estats.tx_retry_error; in nv_update_stats()
1713 if (np->driver_data & DEV_HAS_STATISTICS_V2) { in nv_update_stats()
1714 np->estats.tx_deferral += readl(base + NvRegTxDef); in nv_update_stats()
1715 np->estats.tx_packets += readl(base + NvRegTxFrame); in nv_update_stats()
1716 np->estats.rx_bytes += readl(base + NvRegRxCnt); in nv_update_stats()
1717 np->estats.tx_pause += readl(base + NvRegTxPause); in nv_update_stats()
1718 np->estats.rx_pause += readl(base + NvRegRxPause); in nv_update_stats()
1719 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); in nv_update_stats()
1720 np->estats.rx_errors_total += np->estats.rx_drop_frame; in nv_update_stats()
1723 if (np->driver_data & DEV_HAS_STATISTICS_V3) { in nv_update_stats()
1724 np->estats.tx_unicast += readl(base + NvRegTxUnicast); in nv_update_stats()
1725 np->estats.tx_multicast += readl(base + NvRegTxMulticast); in nv_update_stats()
1726 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); in nv_update_stats()
1741 struct fe_priv *np = netdev_priv(dev); in nv_get_stats64() local
1756 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); in nv_get_stats64()
1757 storage->rx_packets = np->stat_rx_packets; in nv_get_stats64()
1758 storage->rx_bytes = np->stat_rx_bytes; in nv_get_stats64()
1759 storage->rx_dropped = np->stat_rx_dropped; in nv_get_stats64()
1760 storage->rx_missed_errors = np->stat_rx_missed_errors; in nv_get_stats64()
1761 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start)); in nv_get_stats64()
1764 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp); in nv_get_stats64()
1765 storage->tx_packets = np->stat_tx_packets; in nv_get_stats64()
1766 storage->tx_bytes = np->stat_tx_bytes; in nv_get_stats64()
1767 storage->tx_dropped = np->stat_tx_dropped; in nv_get_stats64()
1768 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start)); in nv_get_stats64()
1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) { in nv_get_stats64()
1772 spin_lock_bh(&np->hwstats_lock); in nv_get_stats64()
1777 storage->rx_errors = np->estats.rx_errors_total; in nv_get_stats64()
1778 storage->tx_errors = np->estats.tx_errors_total; in nv_get_stats64()
1781 storage->multicast = np->estats.rx_multicast; in nv_get_stats64()
1784 storage->rx_length_errors = np->estats.rx_length_error; in nv_get_stats64()
1785 storage->rx_over_errors = np->estats.rx_over_errors; in nv_get_stats64()
1786 storage->rx_crc_errors = np->estats.rx_crc_errors; in nv_get_stats64()
1787 storage->rx_frame_errors = np->estats.rx_frame_align_error; in nv_get_stats64()
1788 storage->rx_fifo_errors = np->estats.rx_drop_frame; in nv_get_stats64()
1791 storage->tx_carrier_errors = np->estats.tx_carrier_errors; in nv_get_stats64()
1792 storage->tx_fifo_errors = np->estats.tx_fifo_errors; in nv_get_stats64()
1794 spin_unlock_bh(&np->hwstats_lock); in nv_get_stats64()
1807 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx() local
1810 less_rx = np->get_rx.orig; in nv_alloc_rx()
1811 if (less_rx-- == np->first_rx.orig) in nv_alloc_rx()
1812 less_rx = np->last_rx.orig; in nv_alloc_rx()
1814 while (np->put_rx.orig != less_rx) { in nv_alloc_rx()
1815 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx()
1817 np->put_rx_ctx->skb = skb; in nv_alloc_rx()
1818 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, in nv_alloc_rx()
1822 if (pci_dma_mapping_error(np->pci_dev, in nv_alloc_rx()
1823 np->put_rx_ctx->dma)) { in nv_alloc_rx()
1827 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx()
1828 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma); in nv_alloc_rx()
1830 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); in nv_alloc_rx()
1831 if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) in nv_alloc_rx()
1832 np->put_rx.orig = np->first_rx.orig; in nv_alloc_rx()
1833 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx()
1834 np->put_rx_ctx = np->first_rx_ctx; in nv_alloc_rx()
1837 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx()
1838 np->stat_rx_dropped++; in nv_alloc_rx()
1839 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx()
1848 struct fe_priv *np = netdev_priv(dev); in nv_alloc_rx_optimized() local
1851 less_rx = np->get_rx.ex; in nv_alloc_rx_optimized()
1852 if (less_rx-- == np->first_rx.ex) in nv_alloc_rx_optimized()
1853 less_rx = np->last_rx.ex; in nv_alloc_rx_optimized()
1855 while (np->put_rx.ex != less_rx) { in nv_alloc_rx_optimized()
1856 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); in nv_alloc_rx_optimized()
1858 np->put_rx_ctx->skb = skb; in nv_alloc_rx_optimized()
1859 np->put_rx_ctx->dma = pci_map_single(np->pci_dev, in nv_alloc_rx_optimized()
1863 if (pci_dma_mapping_error(np->pci_dev, in nv_alloc_rx_optimized()
1864 np->put_rx_ctx->dma)) { in nv_alloc_rx_optimized()
1868 np->put_rx_ctx->dma_len = skb_tailroom(skb); in nv_alloc_rx_optimized()
1869 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1870 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma)); in nv_alloc_rx_optimized()
1872 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); in nv_alloc_rx_optimized()
1873 if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) in nv_alloc_rx_optimized()
1874 np->put_rx.ex = np->first_rx.ex; in nv_alloc_rx_optimized()
1875 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) in nv_alloc_rx_optimized()
1876 np->put_rx_ctx = np->first_rx_ctx; in nv_alloc_rx_optimized()
1879 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1880 np->stat_rx_dropped++; in nv_alloc_rx_optimized()
1881 u64_stats_update_end(&np->swstats_rx_syncp); in nv_alloc_rx_optimized()
1892 struct fe_priv *np = netdev_priv(dev); in nv_do_rx_refill() local
1895 napi_schedule(&np->napi); in nv_do_rx_refill()
1900 struct fe_priv *np = netdev_priv(dev); in nv_init_rx() local
1903 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; in nv_init_rx()
1905 if (!nv_optimized(np)) in nv_init_rx()
1906 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; in nv_init_rx()
1908 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; in nv_init_rx()
1909 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; in nv_init_rx()
1910 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; in nv_init_rx()
1912 for (i = 0; i < np->rx_ring_size; i++) { in nv_init_rx()
1913 if (!nv_optimized(np)) { in nv_init_rx()
1914 np->rx_ring.orig[i].flaglen = 0; in nv_init_rx()
1915 np->rx_ring.orig[i].buf = 0; in nv_init_rx()
1917 np->rx_ring.ex[i].flaglen = 0; in nv_init_rx()
1918 np->rx_ring.ex[i].txvlan = 0; in nv_init_rx()
1919 np->rx_ring.ex[i].bufhigh = 0; in nv_init_rx()
1920 np->rx_ring.ex[i].buflow = 0; in nv_init_rx()
1922 np->rx_skb[i].skb = NULL; in nv_init_rx()
1923 np->rx_skb[i].dma = 0; in nv_init_rx()
1929 struct fe_priv *np = netdev_priv(dev); in nv_init_tx() local
1932 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; in nv_init_tx()
1934 if (!nv_optimized(np)) in nv_init_tx()
1935 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; in nv_init_tx()
1937 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; in nv_init_tx()
1938 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; in nv_init_tx()
1939 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; in nv_init_tx()
1940 netdev_reset_queue(np->dev); in nv_init_tx()
1941 np->tx_pkts_in_progress = 0; in nv_init_tx()
1942 np->tx_change_owner = NULL; in nv_init_tx()
1943 np->tx_end_flip = NULL; in nv_init_tx()
1944 np->tx_stop = 0; in nv_init_tx()
1946 for (i = 0; i < np->tx_ring_size; i++) { in nv_init_tx()
1947 if (!nv_optimized(np)) { in nv_init_tx()
1948 np->tx_ring.orig[i].flaglen = 0; in nv_init_tx()
1949 np->tx_ring.orig[i].buf = 0; in nv_init_tx()
1951 np->tx_ring.ex[i].flaglen = 0; in nv_init_tx()
1952 np->tx_ring.ex[i].txvlan = 0; in nv_init_tx()
1953 np->tx_ring.ex[i].bufhigh = 0; in nv_init_tx()
1954 np->tx_ring.ex[i].buflow = 0; in nv_init_tx()
1956 np->tx_skb[i].skb = NULL; in nv_init_tx()
1957 np->tx_skb[i].dma = 0; in nv_init_tx()
1958 np->tx_skb[i].dma_len = 0; in nv_init_tx()
1959 np->tx_skb[i].dma_single = 0; in nv_init_tx()
1960 np->tx_skb[i].first_tx_desc = NULL; in nv_init_tx()
1961 np->tx_skb[i].next_tx_ctx = NULL; in nv_init_tx()
1967 struct fe_priv *np = netdev_priv(dev); in nv_init_ring() local
1972 if (!nv_optimized(np)) in nv_init_ring()
1978 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_unmap_txskb() argument
1982 pci_unmap_single(np->pci_dev, tx_skb->dma, in nv_unmap_txskb()
1986 pci_unmap_page(np->pci_dev, tx_skb->dma, in nv_unmap_txskb()
1993 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) in nv_release_txskb() argument
1995 nv_unmap_txskb(np, tx_skb); in nv_release_txskb()
2006 struct fe_priv *np = netdev_priv(dev); in nv_drain_tx() local
2009 for (i = 0; i < np->tx_ring_size; i++) { in nv_drain_tx()
2010 if (!nv_optimized(np)) { in nv_drain_tx()
2011 np->tx_ring.orig[i].flaglen = 0; in nv_drain_tx()
2012 np->tx_ring.orig[i].buf = 0; in nv_drain_tx()
2014 np->tx_ring.ex[i].flaglen = 0; in nv_drain_tx()
2015 np->tx_ring.ex[i].txvlan = 0; in nv_drain_tx()
2016 np->tx_ring.ex[i].bufhigh = 0; in nv_drain_tx()
2017 np->tx_ring.ex[i].buflow = 0; in nv_drain_tx()
2019 if (nv_release_txskb(np, &np->tx_skb[i])) { in nv_drain_tx()
2020 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_drain_tx()
2021 np->stat_tx_dropped++; in nv_drain_tx()
2022 u64_stats_update_end(&np->swstats_tx_syncp); in nv_drain_tx()
2024 np->tx_skb[i].dma = 0; in nv_drain_tx()
2025 np->tx_skb[i].dma_len = 0; in nv_drain_tx()
2026 np->tx_skb[i].dma_single = 0; in nv_drain_tx()
2027 np->tx_skb[i].first_tx_desc = NULL; in nv_drain_tx()
2028 np->tx_skb[i].next_tx_ctx = NULL; in nv_drain_tx()
2030 np->tx_pkts_in_progress = 0; in nv_drain_tx()
2031 np->tx_change_owner = NULL; in nv_drain_tx()
2032 np->tx_end_flip = NULL; in nv_drain_tx()
2037 struct fe_priv *np = netdev_priv(dev); in nv_drain_rx() local
2040 for (i = 0; i < np->rx_ring_size; i++) { in nv_drain_rx()
2041 if (!nv_optimized(np)) { in nv_drain_rx()
2042 np->rx_ring.orig[i].flaglen = 0; in nv_drain_rx()
2043 np->rx_ring.orig[i].buf = 0; in nv_drain_rx()
2045 np->rx_ring.ex[i].flaglen = 0; in nv_drain_rx()
2046 np->rx_ring.ex[i].txvlan = 0; in nv_drain_rx()
2047 np->rx_ring.ex[i].bufhigh = 0; in nv_drain_rx()
2048 np->rx_ring.ex[i].buflow = 0; in nv_drain_rx()
2051 if (np->rx_skb[i].skb) { in nv_drain_rx()
2052 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, in nv_drain_rx()
2053 (skb_end_pointer(np->rx_skb[i].skb) - in nv_drain_rx()
2054 np->rx_skb[i].skb->data), in nv_drain_rx()
2056 dev_kfree_skb(np->rx_skb[i].skb); in nv_drain_rx()
2057 np->rx_skb[i].skb = NULL; in nv_drain_rx()
2068 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np) in nv_get_empty_tx_slots() argument
2070 …return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_r… in nv_get_empty_tx_slots()
2187 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit() local
2189 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_start_xmit()
2212 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2213 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit()
2216 np->tx_stop = 1; in nv_start_xmit()
2217 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2220 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2222 start_tx = put_tx = np->put_tx.orig; in nv_start_xmit()
2227 prev_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2229 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, in nv_start_xmit()
2231 if (pci_dma_mapping_error(np->pci_dev, in nv_start_xmit()
2232 np->put_tx_ctx->dma)) { in nv_start_xmit()
2235 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2236 np->stat_tx_dropped++; in nv_start_xmit()
2237 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2240 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2241 np->put_tx_ctx->dma_single = 1; in nv_start_xmit()
2242 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2245 tx_flags = np->tx_flags; in nv_start_xmit()
2248 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2249 put_tx = np->first_tx.orig; in nv_start_xmit()
2250 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2251 np->put_tx_ctx = np->first_tx_ctx; in nv_start_xmit()
2262 prev_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2264 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit()
2267 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit()
2268 &np->pci_dev->dev, in nv_start_xmit()
2272 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { in nv_start_xmit()
2276 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit()
2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2278 tmp_tx_ctx = np->first_tx_ctx; in nv_start_xmit()
2279 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit()
2281 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit()
2282 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit()
2283 np->stat_tx_dropped++; in nv_start_xmit()
2284 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit()
2288 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit()
2289 np->put_tx_ctx->dma_single = 0; in nv_start_xmit()
2290 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); in nv_start_xmit()
2295 if (unlikely(put_tx++ == np->last_tx.orig)) in nv_start_xmit()
2296 put_tx = np->first_tx.orig; in nv_start_xmit()
2297 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit()
2298 np->put_tx_ctx = np->first_tx_ctx; in nv_start_xmit()
2314 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit()
2319 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit()
2323 np->put_tx.orig = put_tx; in nv_start_xmit()
2325 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit()
2327 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_start_xmit()
2334 struct fe_priv *np = netdev_priv(dev); in nv_start_xmit_optimized() local
2360 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2361 empty_slots = nv_get_empty_tx_slots(np); in nv_start_xmit_optimized()
2364 np->tx_stop = 1; in nv_start_xmit_optimized()
2365 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2368 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2370 start_tx = put_tx = np->put_tx.ex; in nv_start_xmit_optimized()
2371 start_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2376 prev_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2378 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, in nv_start_xmit_optimized()
2380 if (pci_dma_mapping_error(np->pci_dev, in nv_start_xmit_optimized()
2381 np->put_tx_ctx->dma)) { in nv_start_xmit_optimized()
2384 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2385 np->stat_tx_dropped++; in nv_start_xmit_optimized()
2386 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2389 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2390 np->put_tx_ctx->dma_single = 1; in nv_start_xmit_optimized()
2391 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2392 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2398 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2399 put_tx = np->first_tx.ex; in nv_start_xmit_optimized()
2400 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2401 np->put_tx_ctx = np->first_tx_ctx; in nv_start_xmit_optimized()
2412 prev_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2415 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2416 np->put_tx_ctx->dma = skb_frag_dma_map( in nv_start_xmit_optimized()
2417 &np->pci_dev->dev, in nv_start_xmit_optimized()
2422 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { in nv_start_xmit_optimized()
2426 nv_unmap_txskb(np, start_tx_ctx); in nv_start_xmit_optimized()
2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2428 tmp_tx_ctx = np->first_tx_ctx; in nv_start_xmit_optimized()
2429 } while (tmp_tx_ctx != np->put_tx_ctx); in nv_start_xmit_optimized()
2431 np->put_tx_ctx = start_tx_ctx; in nv_start_xmit_optimized()
2432 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2433 np->stat_tx_dropped++; in nv_start_xmit_optimized()
2434 u64_stats_update_end(&np->swstats_tx_syncp); in nv_start_xmit_optimized()
2437 np->put_tx_ctx->dma_len = bcnt; in nv_start_xmit_optimized()
2438 np->put_tx_ctx->dma_single = 0; in nv_start_xmit_optimized()
2439 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2440 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma)); in nv_start_xmit_optimized()
2445 if (unlikely(put_tx++ == np->last_tx.ex)) in nv_start_xmit_optimized()
2446 put_tx = np->first_tx.ex; in nv_start_xmit_optimized()
2447 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) in nv_start_xmit_optimized()
2448 np->put_tx_ctx = np->first_tx_ctx; in nv_start_xmit_optimized()
2471 spin_lock_irqsave(&np->lock, flags); in nv_start_xmit_optimized()
2473 if (np->tx_limit) { in nv_start_xmit_optimized()
2479 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) { in nv_start_xmit_optimized()
2480 if (!np->tx_change_owner) in nv_start_xmit_optimized()
2481 np->tx_change_owner = start_tx_ctx; in nv_start_xmit_optimized()
2486 start_tx_ctx->next_tx_ctx = np->put_tx_ctx; in nv_start_xmit_optimized()
2487 np->tx_end_flip = np->put_tx_ctx; in nv_start_xmit_optimized()
2489 np->tx_pkts_in_progress++; in nv_start_xmit_optimized()
2496 netdev_sent_queue(np->dev, skb->len); in nv_start_xmit_optimized()
2500 np->put_tx.ex = put_tx; in nv_start_xmit_optimized()
2502 spin_unlock_irqrestore(&np->lock, flags); in nv_start_xmit_optimized()
2504 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_start_xmit_optimized()
2510 struct fe_priv *np = netdev_priv(dev); in nv_tx_flip_ownership() local
2512 np->tx_pkts_in_progress--; in nv_tx_flip_ownership()
2513 if (np->tx_change_owner) { in nv_tx_flip_ownership()
2514 np->tx_change_owner->first_tx_desc->flaglen |= in nv_tx_flip_ownership()
2516 np->tx_pkts_in_progress++; in nv_tx_flip_ownership()
2518 np->tx_change_owner = np->tx_change_owner->next_tx_ctx; in nv_tx_flip_ownership()
2519 if (np->tx_change_owner == np->tx_end_flip) in nv_tx_flip_ownership()
2520 np->tx_change_owner = NULL; in nv_tx_flip_ownership()
2522 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_tx_flip_ownership()
2533 struct fe_priv *np = netdev_priv(dev); in nv_tx_done() local
2536 struct ring_desc *orig_get_tx = np->get_tx.orig; in nv_tx_done()
2539 while ((np->get_tx.orig != np->put_tx.orig) && in nv_tx_done()
2540 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && in nv_tx_done()
2543 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done()
2545 if (np->desc_ver == DESC_VER_1) { in nv_tx_done()
2552 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2553 np->stat_tx_packets++; in nv_tx_done()
2554 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done()
2555 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2557 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2558 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2559 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2569 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done()
2570 np->stat_tx_packets++; in nv_tx_done()
2571 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done()
2572 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done()
2574 bytes_compl += np->get_tx_ctx->skb->len; in nv_tx_done()
2575 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done()
2576 np->get_tx_ctx->skb = NULL; in nv_tx_done()
2580 if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) in nv_tx_done()
2581 np->get_tx.orig = np->first_tx.orig; in nv_tx_done()
2582 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done()
2583 np->get_tx_ctx = np->first_tx_ctx; in nv_tx_done()
2586 netdev_completed_queue(np->dev, tx_work, bytes_compl); in nv_tx_done()
2588 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) { in nv_tx_done()
2589 np->tx_stop = 0; in nv_tx_done()
2597 struct fe_priv *np = netdev_priv(dev); in nv_tx_done_optimized() local
2600 struct ring_desc_ex *orig_get_tx = np->get_tx.ex; in nv_tx_done_optimized()
2603 while ((np->get_tx.ex != np->put_tx.ex) && in nv_tx_done_optimized()
2604 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && in nv_tx_done_optimized()
2607 nv_unmap_txskb(np, np->get_tx_ctx); in nv_tx_done_optimized()
2613 if (np->driver_data & DEV_HAS_GEAR_MODE) in nv_tx_done_optimized()
2619 u64_stats_update_begin(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2620 np->stat_tx_packets++; in nv_tx_done_optimized()
2621 np->stat_tx_bytes += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2622 u64_stats_update_end(&np->swstats_tx_syncp); in nv_tx_done_optimized()
2625 bytes_cleaned += np->get_tx_ctx->skb->len; in nv_tx_done_optimized()
2626 dev_kfree_skb_any(np->get_tx_ctx->skb); in nv_tx_done_optimized()
2627 np->get_tx_ctx->skb = NULL; in nv_tx_done_optimized()
2630 if (np->tx_limit) in nv_tx_done_optimized()
2634 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) in nv_tx_done_optimized()
2635 np->get_tx.ex = np->first_tx.ex; in nv_tx_done_optimized()
2636 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) in nv_tx_done_optimized()
2637 np->get_tx_ctx = np->first_tx_ctx; in nv_tx_done_optimized()
2640 netdev_completed_queue(np->dev, tx_work, bytes_cleaned); in nv_tx_done_optimized()
2642 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) { in nv_tx_done_optimized()
2643 np->tx_stop = 0; in nv_tx_done_optimized()
2655 struct fe_priv *np = netdev_priv(dev); in nv_tx_timeout() local
2661 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_tx_timeout()
2671 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr); in nv_tx_timeout()
2673 for (i = 0; i <= np->register_size; i += 32) { in nv_tx_timeout()
2684 for (i = 0; i < np->tx_ring_size; i += 4) { in nv_tx_timeout()
2685 if (!nv_optimized(np)) { in nv_tx_timeout()
2690 le32_to_cpu(np->tx_ring.orig[i].buf), in nv_tx_timeout()
2691 le32_to_cpu(np->tx_ring.orig[i].flaglen), in nv_tx_timeout()
2692 le32_to_cpu(np->tx_ring.orig[i+1].buf), in nv_tx_timeout()
2693 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), in nv_tx_timeout()
2694 le32_to_cpu(np->tx_ring.orig[i+2].buf), in nv_tx_timeout()
2695 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), in nv_tx_timeout()
2696 le32_to_cpu(np->tx_ring.orig[i+3].buf), in nv_tx_timeout()
2697 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); in nv_tx_timeout()
2705 le32_to_cpu(np->tx_ring.ex[i].bufhigh), in nv_tx_timeout()
2706 le32_to_cpu(np->tx_ring.ex[i].buflow), in nv_tx_timeout()
2707 le32_to_cpu(np->tx_ring.ex[i].flaglen), in nv_tx_timeout()
2708 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), in nv_tx_timeout()
2709 le32_to_cpu(np->tx_ring.ex[i+1].buflow), in nv_tx_timeout()
2710 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), in nv_tx_timeout()
2711 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), in nv_tx_timeout()
2712 le32_to_cpu(np->tx_ring.ex[i+2].buflow), in nv_tx_timeout()
2713 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), in nv_tx_timeout()
2714 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), in nv_tx_timeout()
2715 le32_to_cpu(np->tx_ring.ex[i+3].buflow), in nv_tx_timeout()
2716 le32_to_cpu(np->tx_ring.ex[i+3].flaglen)); in nv_tx_timeout()
2721 spin_lock_irq(&np->lock); in nv_tx_timeout()
2727 saved_tx_limit = np->tx_limit; in nv_tx_timeout()
2728 np->tx_limit = 0; /* prevent giving HW any limited pkts */ in nv_tx_timeout()
2729 np->tx_stop = 0; /* prevent waking tx queue */ in nv_tx_timeout()
2730 if (!nv_optimized(np)) in nv_tx_timeout()
2731 nv_tx_done(dev, np->tx_ring_size); in nv_tx_timeout()
2733 nv_tx_done_optimized(dev, np->tx_ring_size); in nv_tx_timeout()
2736 if (np->tx_change_owner) in nv_tx_timeout()
2737 put_tx.ex = np->tx_change_owner->first_tx_desc; in nv_tx_timeout()
2739 put_tx = np->put_tx; in nv_tx_timeout()
2746 np->get_tx = np->put_tx = put_tx; in nv_tx_timeout()
2747 np->tx_limit = saved_tx_limit; in nv_tx_timeout()
2752 spin_unlock_irq(&np->lock); in nv_tx_timeout()
2800 struct fe_priv *np = netdev_priv(dev); in nv_rx_process() local
2806 while ((np->get_rx.orig != np->put_rx.orig) && in nv_rx_process()
2807 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && in nv_rx_process()
2815 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, in nv_rx_process()
2816 np->get_rx_ctx->dma_len, in nv_rx_process()
2818 skb = np->get_rx_ctx->skb; in nv_rx_process()
2819 np->get_rx_ctx->skb = NULL; in nv_rx_process()
2822 if (np->desc_ver == DESC_VER_1) { in nv_rx_process()
2841 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2842 np->stat_rx_missed_errors++; in nv_rx_process()
2843 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2886 napi_gro_receive(&np->napi, skb); in nv_rx_process()
2887 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process()
2888 np->stat_rx_packets++; in nv_rx_process()
2889 np->stat_rx_bytes += len; in nv_rx_process()
2890 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process()
2892 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) in nv_rx_process()
2893 np->get_rx.orig = np->first_rx.orig; in nv_rx_process()
2894 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process()
2895 np->get_rx_ctx = np->first_rx_ctx; in nv_rx_process()
2905 struct fe_priv *np = netdev_priv(dev); in nv_rx_process_optimized() local
2912 while ((np->get_rx.ex != np->put_rx.ex) && in nv_rx_process_optimized()
2913 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && in nv_rx_process_optimized()
2921 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, in nv_rx_process_optimized()
2922 np->get_rx_ctx->dma_len, in nv_rx_process_optimized()
2924 skb = np->get_rx_ctx->skb; in nv_rx_process_optimized()
2925 np->get_rx_ctx->skb = NULL; in nv_rx_process_optimized()
2959 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); in nv_rx_process_optimized()
2972 napi_gro_receive(&np->napi, skb); in nv_rx_process_optimized()
2973 u64_stats_update_begin(&np->swstats_rx_syncp); in nv_rx_process_optimized()
2974 np->stat_rx_packets++; in nv_rx_process_optimized()
2975 np->stat_rx_bytes += len; in nv_rx_process_optimized()
2976 u64_stats_update_end(&np->swstats_rx_syncp); in nv_rx_process_optimized()
2981 if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) in nv_rx_process_optimized()
2982 np->get_rx.ex = np->first_rx.ex; in nv_rx_process_optimized()
2983 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) in nv_rx_process_optimized()
2984 np->get_rx_ctx = np->first_rx_ctx; in nv_rx_process_optimized()
2994 struct fe_priv *np = netdev_priv(dev); in set_bufsize() local
2997 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS; in set_bufsize()
2999 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS; in set_bufsize()
3008 struct fe_priv *np = netdev_priv(dev); in nv_change_mtu() local
3011 if (new_mtu < 64 || new_mtu > np->pkt_limit) in nv_change_mtu()
3036 spin_lock(&np->lock); in nv_change_mtu()
3045 if (!np->in_shutdown) in nv_change_mtu()
3046 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_change_mtu()
3049 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_change_mtu()
3051 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_change_mtu()
3054 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_change_mtu()
3059 spin_unlock(&np->lock); in nv_change_mtu()
3087 struct fe_priv *np = netdev_priv(dev); in nv_set_mac_address() local
3099 spin_lock_irq(&np->lock); in nv_set_mac_address()
3109 spin_unlock_irq(&np->lock); in nv_set_mac_address()
3124 struct fe_priv *np = netdev_priv(dev); in nv_set_multicast() local
3171 spin_lock_irq(&np->lock); in nv_set_multicast()
3179 spin_unlock_irq(&np->lock); in nv_set_multicast()
3184 struct fe_priv *np = netdev_priv(dev); in nv_update_pause() local
3187 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE); in nv_update_pause()
3189 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) { in nv_update_pause()
3193 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_update_pause()
3198 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { in nv_update_pause()
3202 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) in nv_update_pause()
3204 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { in nv_update_pause()
3211 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_update_pause()
3221 struct fe_priv *np = netdev_priv(dev); in nv_force_linkspeed() local
3226 np->linkspeed = NVREG_LINKSPEED_FORCE|speed; in nv_force_linkspeed()
3227 np->duplex = duplex; in nv_force_linkspeed()
3230 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_force_linkspeed()
3232 np->gigabit = PHY_GIGABIT; in nv_force_linkspeed()
3235 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) in nv_force_linkspeed()
3237 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3239 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_force_linkspeed()
3246 if (np->duplex == 0) in nv_force_linkspeed()
3248 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_force_linkspeed()
3250 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3256 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3266 if (np->desc_ver == DESC_VER_1) { in nv_force_linkspeed()
3269 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == in nv_force_linkspeed()
3277 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_force_linkspeed()
3280 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_force_linkspeed()
3299 struct fe_priv *np = netdev_priv(dev); in nv_update_linkspeed() local
3304 int newls = np->linkspeed; in nv_update_linkspeed()
3305 int newdup = np->duplex; in nv_update_linkspeed()
3316 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_update_linkspeed()
3329 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3330 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_update_linkspeed()
3339 if (np->autoneg == 0) { in nv_update_linkspeed()
3340 if (np->fixed_mode & LPA_100FULL) { in nv_update_linkspeed()
3343 } else if (np->fixed_mode & LPA_100HALF) { in nv_update_linkspeed()
3346 } else if (np->fixed_mode & LPA_10FULL) { in nv_update_linkspeed()
3365 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_update_linkspeed()
3366 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); in nv_update_linkspeed()
3369 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3370 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_update_linkspeed()
3371 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ); in nv_update_linkspeed()
3401 if (np->duplex == newdup && np->linkspeed == newls) in nv_update_linkspeed()
3404 np->duplex = newdup; in nv_update_linkspeed()
3405 np->linkspeed = newls; in nv_update_linkspeed()
3417 if (np->gigabit == PHY_GIGABIT) { in nv_update_linkspeed()
3420 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) || in nv_update_linkspeed()
3421 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)) in nv_update_linkspeed()
3423 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3430 if (np->duplex == 0) in nv_update_linkspeed()
3432 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100) in nv_update_linkspeed()
3434 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3438 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ in nv_update_linkspeed()
3440 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { in nv_update_linkspeed()
3443 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { in nv_update_linkspeed()
3444 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) in nv_update_linkspeed()
3453 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) in nv_update_linkspeed()
3460 if (np->desc_ver == DESC_VER_1) { in nv_update_linkspeed()
3463 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) in nv_update_linkspeed()
3470 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD), in nv_update_linkspeed()
3473 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_update_linkspeed()
3478 if (netif_running(dev) && (np->duplex != 0)) { in nv_update_linkspeed()
3479 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { in nv_update_linkspeed()
3487 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3498 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_update_linkspeed()
3506 pause_flags = np->pause_flags; in nv_update_linkspeed()
3550 static void nv_msi_workaround(struct fe_priv *np) in nv_msi_workaround() argument
3556 if (np->msi_flags & NV_MSI_ENABLED) { in nv_msi_workaround()
3557 u8 __iomem *base = np->base; in nv_msi_workaround()
3566 struct fe_priv *np = netdev_priv(dev); in nv_change_interrupt_mode() local
3571 np->quiet_count = 0; in nv_change_interrupt_mode()
3572 if (np->irqmask != NVREG_IRQMASK_CPU) { in nv_change_interrupt_mode()
3573 np->irqmask = NVREG_IRQMASK_CPU; in nv_change_interrupt_mode()
3577 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) { in nv_change_interrupt_mode()
3578 np->quiet_count++; in nv_change_interrupt_mode()
3582 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) { in nv_change_interrupt_mode()
3583 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_change_interrupt_mode()
3595 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq() local
3598 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq()
3599 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq()
3600 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq()
3602 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq()
3603 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq()
3605 if (!(np->events & np->irqmask)) in nv_nic_irq()
3608 nv_msi_workaround(np); in nv_nic_irq()
3610 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq()
3615 __napi_schedule(&np->napi); in nv_nic_irq()
3628 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_optimized() local
3631 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_optimized()
3632 np->events = readl(base + NvRegIrqStatus); in nv_nic_irq_optimized()
3633 writel(np->events, base + NvRegIrqStatus); in nv_nic_irq_optimized()
3635 np->events = readl(base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3636 writel(np->events, base + NvRegMSIXIrqStatus); in nv_nic_irq_optimized()
3638 if (!(np->events & np->irqmask)) in nv_nic_irq_optimized()
3641 nv_msi_workaround(np); in nv_nic_irq_optimized()
3643 if (napi_schedule_prep(&np->napi)) { in nv_nic_irq_optimized()
3648 __napi_schedule(&np->napi); in nv_nic_irq_optimized()
3657 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_tx() local
3667 if (!(events & np->irqmask)) in nv_nic_irq_tx()
3670 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3672 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3675 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_tx()
3680 if (!np->in_shutdown) { in nv_nic_irq_tx()
3681 np->nic_poll_irq |= NVREG_IRQ_TX_ALL; in nv_nic_irq_tx()
3682 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_tx()
3684 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_tx()
3697 struct fe_priv *np = container_of(napi, struct fe_priv, napi); in nv_napi_poll() local
3698 struct net_device *dev = np->dev; in nv_napi_poll()
3705 if (!nv_optimized(np)) { in nv_napi_poll()
3706 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3707 tx_work += nv_tx_done(dev, np->tx_ring_size); in nv_napi_poll()
3708 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3713 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3714 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size); in nv_napi_poll()
3715 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3725 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3726 if (!np->in_shutdown) in nv_napi_poll()
3727 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_napi_poll()
3728 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3733 if (unlikely(np->events & NVREG_IRQ_LINK)) { in nv_napi_poll()
3734 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3736 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3738 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) { in nv_napi_poll()
3739 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3741 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3742 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_napi_poll()
3744 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) { in nv_napi_poll()
3745 spin_lock_irqsave(&np->lock, flags); in nv_napi_poll()
3746 if (!np->in_shutdown) { in nv_napi_poll()
3747 np->nic_poll_irq = np->irqmask; in nv_napi_poll()
3748 np->recover_error = 1; in nv_napi_poll()
3749 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_napi_poll()
3751 spin_unlock_irqrestore(&np->lock, flags); in nv_napi_poll()
3761 writel(np->irqmask, base + NvRegIrqMask); in nv_napi_poll()
3769 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_rx() local
3779 if (!(events & np->irqmask)) in nv_nic_irq_rx()
3784 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3785 if (!np->in_shutdown) in nv_nic_irq_rx()
3786 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_nic_irq_rx()
3787 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3792 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_rx()
3797 if (!np->in_shutdown) { in nv_nic_irq_rx()
3798 np->nic_poll_irq |= NVREG_IRQ_RX_ALL; in nv_nic_irq_rx()
3799 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_rx()
3801 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_rx()
3814 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_other() local
3824 if (!(events & np->irqmask)) in nv_nic_irq_other()
3828 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3830 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3833 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3835 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3837 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { in nv_nic_irq_other()
3838 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3840 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3841 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_nic_irq_other()
3844 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3849 if (!np->in_shutdown) { in nv_nic_irq_other()
3850 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3851 np->recover_error = 1; in nv_nic_irq_other()
3852 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3854 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3858 spin_lock_irqsave(&np->lock, flags); in nv_nic_irq_other()
3863 if (!np->in_shutdown) { in nv_nic_irq_other()
3864 np->nic_poll_irq |= NVREG_IRQ_OTHER; in nv_nic_irq_other()
3865 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); in nv_nic_irq_other()
3867 spin_unlock_irqrestore(&np->lock, flags); in nv_nic_irq_other()
3881 struct fe_priv *np = netdev_priv(dev); in nv_nic_irq_test() local
3885 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { in nv_nic_irq_test()
3896 nv_msi_workaround(np); in nv_nic_irq_test()
3898 spin_lock(&np->lock); in nv_nic_irq_test()
3899 np->intr_test = 1; in nv_nic_irq_test()
3900 spin_unlock(&np->lock); in nv_nic_irq_test()
3931 struct fe_priv *np = get_nvpriv(dev); in nv_request_irq() local
3940 if (nv_optimized(np)) in nv_request_irq()
3946 if (np->msi_flags & NV_MSI_X_CAPABLE) { in nv_request_irq()
3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_request_irq()
3948 np->msi_x_entry[i].entry = i; in nv_request_irq()
3949 ret = pci_enable_msix_range(np->pci_dev, in nv_request_irq()
3950 np->msi_x_entry, in nv_request_irq()
3951 np->msi_flags & NV_MSI_X_VECTORS_MASK, in nv_request_irq()
3952 np->msi_flags & NV_MSI_X_VECTORS_MASK); in nv_request_irq()
3954 np->msi_flags |= NV_MSI_X_ENABLED; in nv_request_irq()
3957 sprintf(np->name_rx, "%s-rx", dev->name); in nv_request_irq()
3958 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, in nv_request_irq()
3959 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev); in nv_request_irq()
3964 pci_disable_msix(np->pci_dev); in nv_request_irq()
3965 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
3969 sprintf(np->name_tx, "%s-tx", dev->name); in nv_request_irq()
3970 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, in nv_request_irq()
3971 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev); in nv_request_irq()
3976 pci_disable_msix(np->pci_dev); in nv_request_irq()
3977 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
3981 sprintf(np->name_other, "%s-other", dev->name); in nv_request_irq()
3982 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, in nv_request_irq()
3983 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev); in nv_request_irq()
3988 pci_disable_msix(np->pci_dev); in nv_request_irq()
3989 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4000 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, in nv_request_irq()
4006 pci_disable_msix(np->pci_dev); in nv_request_irq()
4007 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_request_irq()
4019 if (np->msi_flags & NV_MSI_CAPABLE) { in nv_request_irq()
4020 ret = pci_enable_msi(np->pci_dev); in nv_request_irq()
4022 np->msi_flags |= NV_MSI_ENABLED; in nv_request_irq()
4023 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev); in nv_request_irq()
4027 pci_disable_msi(np->pci_dev); in nv_request_irq()
4028 np->msi_flags &= ~NV_MSI_ENABLED; in nv_request_irq()
4042 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) in nv_request_irq()
4047 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); in nv_request_irq()
4049 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); in nv_request_irq()
4056 struct fe_priv *np = get_nvpriv(dev); in nv_free_irq() local
4059 if (np->msi_flags & NV_MSI_X_ENABLED) { in nv_free_irq()
4060 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) in nv_free_irq()
4061 free_irq(np->msi_x_entry[i].vector, dev); in nv_free_irq()
4062 pci_disable_msix(np->pci_dev); in nv_free_irq()
4063 np->msi_flags &= ~NV_MSI_X_ENABLED; in nv_free_irq()
4065 free_irq(np->pci_dev->irq, dev); in nv_free_irq()
4066 if (np->msi_flags & NV_MSI_ENABLED) { in nv_free_irq()
4067 pci_disable_msi(np->pci_dev); in nv_free_irq()
4068 np->msi_flags &= ~NV_MSI_ENABLED; in nv_free_irq()
4076 struct fe_priv *np = netdev_priv(dev); in nv_do_nic_poll() local
4089 if (np->msi_flags & NV_MSI_X_ENABLED) in nv_do_nic_poll()
4090 irq = np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector; in nv_do_nic_poll()
4092 irq = np->pci_dev->irq; in nv_do_nic_poll()
4093 mask = np->irqmask; in nv_do_nic_poll()
4095 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4096 irq = np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector; in nv_do_nic_poll()
4099 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4100 irq = np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector; in nv_do_nic_poll()
4103 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4104 irq = np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector; in nv_do_nic_poll()
4112 if (np->recover_error) { in nv_do_nic_poll()
4113 np->recover_error = 0; in nv_do_nic_poll()
4118 spin_lock(&np->lock); in nv_do_nic_poll()
4121 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_do_nic_poll()
4129 if (!np->in_shutdown) in nv_do_nic_poll()
4130 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_do_nic_poll()
4133 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_do_nic_poll()
4135 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_do_nic_poll()
4138 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_do_nic_poll()
4141 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_do_nic_poll()
4148 spin_unlock(&np->lock); in nv_do_nic_poll()
4158 np->nic_poll_irq = 0; in nv_do_nic_poll()
4159 if (nv_optimized(np)) in nv_do_nic_poll()
4164 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { in nv_do_nic_poll()
4165 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL; in nv_do_nic_poll()
4168 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { in nv_do_nic_poll()
4169 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL; in nv_do_nic_poll()
4172 if (np->nic_poll_irq & NVREG_IRQ_OTHER) { in nv_do_nic_poll()
4173 np->nic_poll_irq &= ~NVREG_IRQ_OTHER; in nv_do_nic_poll()
4193 struct fe_priv *np = netdev_priv(dev); in nv_do_stats_poll() local
4197 if (spin_trylock(&np->hwstats_lock)) { in nv_do_stats_poll()
4199 spin_unlock(&np->hwstats_lock); in nv_do_stats_poll()
4202 if (!np->in_shutdown) in nv_do_stats_poll()
4203 mod_timer(&np->stats_poll, in nv_do_stats_poll()
4209 struct fe_priv *np = netdev_priv(dev); in nv_get_drvinfo() local
4212 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in nv_get_drvinfo()
4217 struct fe_priv *np = netdev_priv(dev); in nv_get_wol() local
4220 spin_lock_irq(&np->lock); in nv_get_wol()
4221 if (np->wolenabled) in nv_get_wol()
4223 spin_unlock_irq(&np->lock); in nv_get_wol()
4228 struct fe_priv *np = netdev_priv(dev); in nv_set_wol() local
4233 np->wolenabled = 0; in nv_set_wol()
4235 np->wolenabled = 1; in nv_set_wol()
4239 spin_lock_irq(&np->lock); in nv_set_wol()
4241 spin_unlock_irq(&np->lock); in nv_set_wol()
4243 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled); in nv_set_wol()
4249 struct fe_priv *np = netdev_priv(dev); in nv_get_settings() local
4253 spin_lock_irq(&np->lock); in nv_get_settings()
4268 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) { in nv_get_settings()
4283 if (np->duplex) in nv_get_settings()
4290 ecmd->autoneg = np->autoneg; in nv_get_settings()
4293 if (np->autoneg) { in nv_get_settings()
4295 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_get_settings()
4304 if (np->gigabit == PHY_GIGABIT) { in nv_get_settings()
4305 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_get_settings()
4314 if (np->gigabit == PHY_GIGABIT) in nv_get_settings()
4317 ecmd->phy_address = np->phyaddr; in nv_get_settings()
4321 spin_unlock_irq(&np->lock); in nv_get_settings()
4327 struct fe_priv *np = netdev_priv(dev); in nv_set_settings() local
4334 if (ecmd->phy_address != np->phyaddr) { in nv_set_settings()
4344 if (np->gigabit == PHY_GIGABIT) in nv_set_settings()
4370 spin_lock_irqsave(&np->lock, flags); in nv_set_settings()
4381 spin_unlock_irqrestore(&np->lock, flags); in nv_set_settings()
4389 np->autoneg = 1; in nv_set_settings()
4392 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_settings()
4402 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx p… in nv_set_settings()
4404 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_settings()
4406 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_settings()
4408 if (np->gigabit == PHY_GIGABIT) { in nv_set_settings()
4409 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_settings()
4413 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_settings()
4418 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_settings()
4419 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_set_settings()
4429 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_settings()
4434 np->autoneg = 0; in nv_set_settings()
4436 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_settings()
4446 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_settings()
4447 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx p… in nv_set_settings()
4449 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_settings()
4451 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) { in nv_set_settings()
4453 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_settings()
4455 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_settings()
4456 np->fixed_mode = adv; in nv_set_settings()
4458 if (np->gigabit == PHY_GIGABIT) { in nv_set_settings()
4459 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); in nv_set_settings()
4461 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv); in nv_set_settings()
4464 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_settings()
4466 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL)) in nv_set_settings()
4468 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) in nv_set_settings()
4470 if (np->phy_oui == PHY_OUI_MARVELL) { in nv_set_settings()
4477 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_settings()
4498 struct fe_priv *np = netdev_priv(dev); in nv_get_regs_len() local
4499 return np->register_size; in nv_get_regs_len()
4504 struct fe_priv *np = netdev_priv(dev); in nv_get_regs() local
4510 spin_lock_irq(&np->lock); in nv_get_regs()
4511 for (i = 0; i < np->register_size/sizeof(u32); i++) in nv_get_regs()
4513 spin_unlock_irq(&np->lock); in nv_get_regs()
4518 struct fe_priv *np = netdev_priv(dev); in nv_nway_reset() local
4521 if (np->autoneg) { in nv_nway_reset()
4529 spin_lock(&np->lock); in nv_nway_reset()
4532 spin_unlock(&np->lock); in nv_nway_reset()
4538 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_nway_reset()
4539 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { in nv_nway_reset()
4548 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_nway_reset()
4565 struct fe_priv *np = netdev_priv(dev); in nv_get_ringparam() local
4567 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4568 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3; in nv_get_ringparam()
4570 ring->rx_pending = np->rx_ring_size; in nv_get_ringparam()
4571 ring->tx_pending = np->tx_ring_size; in nv_get_ringparam()
4576 struct fe_priv *np = netdev_priv(dev); in nv_set_ringparam() local
4585 (np->desc_ver == DESC_VER_1 && in nv_set_ringparam()
4588 (np->desc_ver != DESC_VER_1 && in nv_set_ringparam()
4595 if (!nv_optimized(np)) { in nv_set_ringparam()
4596 rxtx_ring = pci_alloc_consistent(np->pci_dev, in nv_set_ringparam()
4600 rxtx_ring = pci_alloc_consistent(np->pci_dev, in nv_set_ringparam()
4608 if (!nv_optimized(np)) { in nv_set_ringparam()
4610 … pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), in nv_set_ringparam()
4614 …pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pendin… in nv_set_ringparam()
4628 spin_lock(&np->lock); in nv_set_ringparam()
4639 np->rx_ring_size = ring->rx_pending; in nv_set_ringparam()
4640 np->tx_ring_size = ring->tx_pending; in nv_set_ringparam()
4642 if (!nv_optimized(np)) { in nv_set_ringparam()
4643 np->rx_ring.orig = (struct ring_desc *)rxtx_ring; in nv_set_ringparam()
4644 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_set_ringparam()
4646 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring; in nv_set_ringparam()
4647 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_set_ringparam()
4649 np->rx_skb = (struct nv_skb_map *)rx_skbuff; in nv_set_ringparam()
4650 np->tx_skb = (struct nv_skb_map *)tx_skbuff; in nv_set_ringparam()
4651 np->ring_addr = ring_addr; in nv_set_ringparam()
4653 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); in nv_set_ringparam()
4654 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size); in nv_set_ringparam()
4660 if (!np->in_shutdown) in nv_set_ringparam()
4661 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_set_ringparam()
4665 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_set_ringparam()
4667 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_set_ringparam()
4670 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_set_ringparam()
4675 spin_unlock(&np->lock); in nv_set_ringparam()
4688 struct fe_priv *np = netdev_priv(dev); in nv_get_pauseparam() local
4690 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0; in nv_get_pauseparam()
4691 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0; in nv_get_pauseparam()
4692 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0; in nv_get_pauseparam()
4697 struct fe_priv *np = netdev_priv(dev); in nv_set_pauseparam() local
4700 if ((!np->autoneg && np->duplex == 0) || in nv_set_pauseparam()
4701 (np->autoneg && !pause->autoneg && np->duplex == 0)) { in nv_set_pauseparam()
4705 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { in nv_set_pauseparam()
4715 spin_lock(&np->lock); in nv_set_pauseparam()
4718 spin_unlock(&np->lock); in nv_set_pauseparam()
4723 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ); in nv_set_pauseparam()
4725 np->pause_flags |= NV_PAUSEFRAME_RX_REQ; in nv_set_pauseparam()
4727 np->pause_flags |= NV_PAUSEFRAME_TX_REQ; in nv_set_pauseparam()
4729 if (np->autoneg && pause->autoneg) { in nv_set_pauseparam()
4730 np->pause_flags |= NV_PAUSEFRAME_AUTONEG; in nv_set_pauseparam()
4732 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); in nv_set_pauseparam()
4734 …if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pa… in nv_set_pauseparam()
4736 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) in nv_set_pauseparam()
4738 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); in nv_set_pauseparam()
4742 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_pauseparam()
4744 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); in nv_set_pauseparam()
4746 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE); in nv_set_pauseparam()
4748 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; in nv_set_pauseparam()
4750 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; in nv_set_pauseparam()
4755 nv_update_pause(dev, np->pause_flags); in nv_set_pauseparam()
4767 struct fe_priv *np = netdev_priv(dev); in nv_set_loopback() local
4772 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4773 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_set_loopback()
4776 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4783 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol); in nv_set_loopback()
4786 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4796 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4802 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4808 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4813 spin_lock_irqsave(&np->lock, flags); in nv_set_loopback()
4815 spin_unlock_irqrestore(&np->lock, flags); in nv_set_loopback()
4832 struct fe_priv *np = get_nvpriv(dev); in nv_vlan_mode() local
4834 spin_lock_irq(&np->lock); in nv_vlan_mode()
4837 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4839 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP; in nv_vlan_mode()
4842 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4844 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS; in nv_vlan_mode()
4846 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_vlan_mode()
4848 spin_unlock_irq(&np->lock); in nv_vlan_mode()
4853 struct fe_priv *np = netdev_priv(dev); in nv_set_features() local
4865 spin_lock_irq(&np->lock); in nv_set_features()
4868 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4870 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK; in nv_set_features()
4873 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_set_features()
4875 spin_unlock_irq(&np->lock); in nv_set_features()
4886 struct fe_priv *np = netdev_priv(dev); in nv_get_sset_count() local
4890 if (np->driver_data & DEV_HAS_TEST_EXTENDED) in nv_get_sset_count()
4895 if (np->driver_data & DEV_HAS_STATISTICS_V3) in nv_get_sset_count()
4897 else if (np->driver_data & DEV_HAS_STATISTICS_V2) in nv_get_sset_count()
4899 else if (np->driver_data & DEV_HAS_STATISTICS_V1) in nv_get_sset_count()
4913 struct fe_priv *np = netdev_priv(dev); in nv_get_ethtool_stats() local
4915 spin_lock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
4917 memcpy(buffer, &np->estats, in nv_get_ethtool_stats()
4919 spin_unlock_bh(&np->hwstats_lock); in nv_get_ethtool_stats()
4924 struct fe_priv *np = netdev_priv(dev); in nv_link_test() local
4927 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
4928 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_link_test()
4967 struct fe_priv *np = netdev_priv(dev); in nv_interrupt_test() local
4980 np->intr_test = 0; in nv_interrupt_test()
4983 save_msi_flags = np->msi_flags; in nv_interrupt_test()
4984 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK; in nv_interrupt_test()
4985 np->msi_flags |= 0x001; /* setup 1 vector */ in nv_interrupt_test()
4998 spin_lock_irq(&np->lock); in nv_interrupt_test()
5001 testcnt = np->intr_test; in nv_interrupt_test()
5006 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_interrupt_test()
5011 spin_unlock_irq(&np->lock); in nv_interrupt_test()
5015 np->msi_flags = save_msi_flags; in nv_interrupt_test()
5030 struct fe_priv *np = netdev_priv(dev); in nv_loopback_test() local
5034 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); in nv_loopback_test()
5059 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_loopback_test()
5061 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_loopback_test()
5075 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, in nv_loopback_test()
5078 if (pci_dma_mapping_error(np->pci_dev, in nv_loopback_test()
5087 if (!nv_optimized(np)) { in nv_loopback_test()
5088 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); in nv_loopback_test()
5089 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5091 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr)); in nv_loopback_test()
5092 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr)); in nv_loopback_test()
5093 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); in nv_loopback_test()
5095 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_loopback_test()
5101 if (!nv_optimized(np)) { in nv_loopback_test()
5102 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); in nv_loopback_test()
5103 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); in nv_loopback_test()
5106 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen); in nv_loopback_test()
5107 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); in nv_loopback_test()
5112 } else if (np->desc_ver == DESC_VER_1) { in nv_loopback_test()
5124 rx_skb = np->rx_skb[0].skb; in nv_loopback_test()
5134 pci_unmap_single(np->pci_dev, test_dma_addr, in nv_loopback_test()
5156 struct fe_priv *np = netdev_priv(dev); in nv_self_test() local
5174 spin_lock_irq(&np->lock); in nv_self_test()
5175 nv_disable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5176 if (!(np->msi_flags & NV_MSI_X_ENABLED)) in nv_self_test()
5185 spin_unlock_irq(&np->lock); in nv_self_test()
5214 if (!np->in_shutdown) in nv_self_test()
5215 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_self_test()
5218 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_self_test()
5220 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_self_test()
5223 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); in nv_self_test()
5229 nv_enable_hw_interrupts(dev, np->irqmask); in nv_self_test()
5270 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_acquire_sema() local
5294 np->mgmt_sema = 1; in nv_mgmt_acquire_sema()
5305 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_release_sema() local
5309 if (np->driver_data & DEV_HAS_MGMT_UNIT) { in nv_mgmt_release_sema()
5310 if (np->mgmt_sema) { in nv_mgmt_release_sema()
5321 struct fe_priv *np = netdev_priv(dev); in nv_mgmt_get_version() local
5343 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION; in nv_mgmt_get_version()
5350 struct fe_priv *np = netdev_priv(dev); in nv_open() local
5357 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_open()
5358 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); in nv_open()
5362 if (np->driver_data & DEV_HAS_POWER_CNTRL) in nv_open()
5375 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) in nv_open()
5387 np->in_shutdown = 0; in nv_open()
5391 …writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSH… in nv_open()
5394 writel(np->linkspeed, base + NvRegLinkSpeed); in nv_open()
5395 if (np->desc_ver == DESC_VER_1) in nv_open()
5399 writel(np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5400 writel(np->vlanctl_bits, base + NvRegVlanControl); in nv_open()
5402 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); in nv_open()
5416 writel(np->rx_buf_sz, base + NvRegOffloadConfig); in nv_open()
5422 if (np->desc_ver == DESC_VER_1) { in nv_open()
5425 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) { in nv_open()
5443 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, in nv_open()
5447 if (np->wolenabled) in nv_open()
5458 nv_disable_hw_interrupts(dev, np->irqmask); in nv_open()
5468 nv_enable_hw_interrupts(dev, np->irqmask); in nv_open()
5470 spin_lock_irq(&np->lock); in nv_open()
5486 np->linkspeed = 0; in nv_open()
5499 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); in nv_open()
5502 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) in nv_open()
5503 mod_timer(&np->stats_poll, in nv_open()
5506 spin_unlock_irq(&np->lock); in nv_open()
5522 struct fe_priv *np = netdev_priv(dev); in nv_close() local
5525 spin_lock_irq(&np->lock); in nv_close()
5526 np->in_shutdown = 1; in nv_close()
5527 spin_unlock_irq(&np->lock); in nv_close()
5529 synchronize_irq(np->pci_dev->irq); in nv_close()
5531 del_timer_sync(&np->oom_kick); in nv_close()
5532 del_timer_sync(&np->nic_poll); in nv_close()
5533 del_timer_sync(&np->stats_poll); in nv_close()
5536 spin_lock_irq(&np->lock); in nv_close()
5543 nv_disable_hw_interrupts(dev, np->irqmask); in nv_close()
5546 spin_unlock_irq(&np->lock); in nv_close()
5552 if (np->wolenabled || !phy_power_down) { in nv_close()
5558 mii_rw(dev, np->phyaddr, MII_BMCR, in nv_close()
5559 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN); in nv_close()
5605 struct fe_priv *np; in nv_probe() local
5623 np = netdev_priv(dev); in nv_probe()
5624 np->dev = dev; in nv_probe()
5625 np->pci_dev = pci_dev; in nv_probe()
5626 spin_lock_init(&np->lock); in nv_probe()
5627 spin_lock_init(&np->hwstats_lock); in nv_probe()
5629 u64_stats_init(&np->swstats_rx_syncp); in nv_probe()
5630 u64_stats_init(&np->swstats_tx_syncp); in nv_probe()
5632 init_timer(&np->oom_kick); in nv_probe()
5633 np->oom_kick.data = (unsigned long) dev; in nv_probe()
5634 np->oom_kick.function = nv_do_rx_refill; /* timer handler */ in nv_probe()
5635 init_timer(&np->nic_poll); in nv_probe()
5636 np->nic_poll.data = (unsigned long) dev; in nv_probe()
5637 np->nic_poll.function = nv_do_nic_poll; /* timer handler */ in nv_probe()
5638 init_timer_deferrable(&np->stats_poll); in nv_probe()
5639 np->stats_poll.data = (unsigned long) dev; in nv_probe()
5640 np->stats_poll.function = nv_do_stats_poll; /* timer handler */ in nv_probe()
5653 np->register_size = NV_PCI_REGSZ_VER3; in nv_probe()
5655 np->register_size = NV_PCI_REGSZ_VER2; in nv_probe()
5657 np->register_size = NV_PCI_REGSZ_VER1; in nv_probe()
5663 pci_resource_len(pci_dev, i) >= np->register_size) { in nv_probe()
5674 np->driver_data = id->driver_data; in nv_probe()
5676 np->device_id = id->device; in nv_probe()
5681 np->desc_ver = DESC_VER_3; in nv_probe()
5682 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; in nv_probe()
5696 np->desc_ver = DESC_VER_2; in nv_probe()
5697 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; in nv_probe()
5700 np->desc_ver = DESC_VER_1; in nv_probe()
5701 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; in nv_probe()
5704 np->pkt_limit = NV_PKTLIMIT_1; in nv_probe()
5706 np->pkt_limit = NV_PKTLIMIT_2; in nv_probe()
5709 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; in nv_probe()
5714 np->vlanctl_bits = 0; in nv_probe()
5716 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; in nv_probe()
5726 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; in nv_probe()
5730 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; in nv_probe()
5734 np->base = ioremap(addr, np->register_size); in nv_probe()
5735 if (!np->base) in nv_probe()
5738 np->rx_ring_size = RX_RING_DEFAULT; in nv_probe()
5739 np->tx_ring_size = TX_RING_DEFAULT; in nv_probe()
5741 if (!nv_optimized(np)) { in nv_probe()
5742 np->rx_ring.orig = pci_alloc_consistent(pci_dev, in nv_probe()
5743 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), in nv_probe()
5744 &np->ring_addr); in nv_probe()
5745 if (!np->rx_ring.orig) in nv_probe()
5747 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; in nv_probe()
5749 np->rx_ring.ex = pci_alloc_consistent(pci_dev, in nv_probe()
5750 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), in nv_probe()
5751 &np->ring_addr); in nv_probe()
5752 if (!np->rx_ring.ex) in nv_probe()
5754 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; in nv_probe()
5756 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5757 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL); in nv_probe()
5758 if (!np->rx_skb || !np->tx_skb) in nv_probe()
5761 if (!nv_optimized(np)) in nv_probe()
5766 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); in nv_probe()
5774 np->orig_mac[0] = readl(base + NvRegMacAddrA); in nv_probe()
5775 np->orig_mac[1] = readl(base + NvRegMacAddrB); in nv_probe()
5781 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5782 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5783 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5784 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5785 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5786 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5789 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5790 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5791 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5792 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5793 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5794 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5800 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) + in nv_probe()
5802 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8); in nv_probe()
5805 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; in nv_probe()
5806 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; in nv_probe()
5807 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; in nv_probe()
5808 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; in nv_probe()
5809 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; in nv_probe()
5810 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; in nv_probe()
5835 np->wolenabled = 0; in nv_probe()
5849 if (np->desc_ver == DESC_VER_1) in nv_probe()
5850 np->tx_flags = NV_TX_VALID; in nv_probe()
5852 np->tx_flags = NV_TX2_VALID; in nv_probe()
5854 np->msi_flags = 0; in nv_probe()
5856 np->msi_flags |= NV_MSI_CAPABLE; in nv_probe()
5863 np->msi_flags |= NV_MSI_X_CAPABLE; in nv_probe()
5868 np->irqmask = NVREG_IRQMASK_CPU; in nv_probe()
5869 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5870 np->msi_flags |= 0x0001; in nv_probe()
5874 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5876 np->msi_flags &= ~NV_MSI_X_CAPABLE; in nv_probe()
5879 np->irqmask = NVREG_IRQMASK_THROUGHPUT; in nv_probe()
5880 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ in nv_probe()
5881 np->msi_flags |= 0x0003; in nv_probe()
5885 np->irqmask |= NVREG_IRQ_TIMER; in nv_probe()
5887 np->need_linktimer = 1; in nv_probe()
5888 np->link_timeout = jiffies + LINK_TIMEOUT; in nv_probe()
5890 np->need_linktimer = 0; in nv_probe()
5895 np->tx_limit = 1; in nv_probe()
5898 np->tx_limit = 0; in nv_probe()
5917 np->mac_in_use = 1; in nv_probe()
5918 if (np->mgmt_version > 0) in nv_probe()
5919 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; in nv_probe()
5921 if (np->mac_in_use && in nv_probe()
5937 spin_lock_irq(&np->lock); in nv_probe()
5939 spin_unlock_irq(&np->lock); in nv_probe()
5942 spin_lock_irq(&np->lock); in nv_probe()
5944 spin_unlock_irq(&np->lock); in nv_probe()
5948 np->phy_model = id2 & PHYID2_MODEL_MASK; in nv_probe()
5951 np->phyaddr = phyaddr; in nv_probe()
5952 np->phy_oui = id1 | id2; in nv_probe()
5955 if (np->phy_oui == PHY_OUI_REALTEK2) in nv_probe()
5956 np->phy_oui = PHY_OUI_REALTEK; in nv_probe()
5958 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211) in nv_probe()
5959 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK; in nv_probe()
5973 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); in nv_probe()
5975 np->gigabit = PHY_GIGABIT; in nv_probe()
5979 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; in nv_probe()
5980 np->duplex = 0; in nv_probe()
5981 np->autoneg = 1; in nv_probe()
6003 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); in nv_probe()
6017 np->gigabit == PHY_GIGABIT ? "gbit " : "", in nv_probe()
6018 np->need_linktimer ? "lnktim " : "", in nv_probe()
6019 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", in nv_probe()
6020 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", in nv_probe()
6021 np->desc_ver); in nv_probe()
6044 struct fe_priv *np = netdev_priv(dev); in nv_restore_phy() local
6047 if (np->phy_oui == PHY_OUI_REALTEK && in nv_restore_phy()
6048 np->phy_model == PHY_MODEL_REALTEK_8201 && in nv_restore_phy()
6050 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3); in nv_restore_phy()
6051 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ); in nv_restore_phy()
6054 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved); in nv_restore_phy()
6055 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1); in nv_restore_phy()
6058 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); in nv_restore_phy()
6060 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control); in nv_restore_phy()
6067 struct fe_priv *np = netdev_priv(dev); in nv_restore_mac_addr() local
6073 writel(np->orig_mac[0], base + NvRegMacAddrA); in nv_restore_mac_addr()
6074 writel(np->orig_mac[1], base + NvRegMacAddrB); in nv_restore_mac_addr()
6105 struct fe_priv *np = netdev_priv(dev); in nv_suspend() local
6116 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_suspend()
6117 np->saved_config_space[i] = readl(base + i*sizeof(u32)); in nv_suspend()
6126 struct fe_priv *np = netdev_priv(dev); in nv_resume() local
6131 for (i = 0; i <= np->register_size/sizeof(u32); i++) in nv_resume()
6132 writel(np->saved_config_space[i], base+i*sizeof(u32)); in nv_resume()
6134 if (np->driver_data & DEV_NEED_MSI_FIX) in nv_resume()
6159 struct fe_priv *np = netdev_priv(dev); in nv_shutdown() local
6178 pci_wake_from_d3(pdev, np->wolenabled); in nv_shutdown()