Lines Matching refs:rp
529 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) in rhine_wait_bit() argument
531 void __iomem *ioaddr = rp->base; in rhine_wait_bit()
542 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " in rhine_wait_bit()
547 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_high() argument
549 rhine_wait_bit(rp, reg, mask, false); in rhine_wait_bit_high()
552 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_low() argument
554 rhine_wait_bit(rp, reg, mask, true); in rhine_wait_bit_low()
557 static u32 rhine_get_events(struct rhine_private *rp) in rhine_get_events() argument
559 void __iomem *ioaddr = rp->base; in rhine_get_events()
564 if (rp->quirks & rqStatusWBRace) in rhine_get_events()
569 static void rhine_ack_events(struct rhine_private *rp, u32 mask) in rhine_ack_events() argument
571 void __iomem *ioaddr = rp->base; in rhine_ack_events()
573 if (rp->quirks & rqStatusWBRace) in rhine_ack_events()
585 struct rhine_private *rp = netdev_priv(dev); in rhine_power_init() local
586 void __iomem *ioaddr = rp->base; in rhine_power_init()
589 if (rp->quirks & rqWOL) { in rhine_power_init()
599 if (rp->quirks & rq6patterns) in rhine_power_init()
604 if (rp->quirks & rq6patterns) in rhine_power_init()
609 if (rp->quirks & rq6patterns) in rhine_power_init()
641 struct rhine_private *rp = netdev_priv(dev); in rhine_chip_reset() local
642 void __iomem *ioaddr = rp->base; in rhine_chip_reset()
652 if (rp->quirks & rqForceReset) in rhine_chip_reset()
656 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); in rhine_chip_reset()
660 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? in rhine_chip_reset()
711 struct rhine_private *rp = netdev_priv(dev); in rhine_reload_eeprom() local
712 void __iomem *ioaddr = rp->base; in rhine_reload_eeprom()
728 enable_mmio(pioaddr, rp->quirks); in rhine_reload_eeprom()
731 if (rp->quirks & rqWOL) in rhine_reload_eeprom()
739 struct rhine_private *rp = netdev_priv(dev); in rhine_poll() local
740 const int irq = rp->irq; in rhine_poll()
748 static void rhine_kick_tx_threshold(struct rhine_private *rp) in rhine_kick_tx_threshold() argument
750 if (rp->tx_thresh < 0xe0) { in rhine_kick_tx_threshold()
751 void __iomem *ioaddr = rp->base; in rhine_kick_tx_threshold()
753 rp->tx_thresh += 0x20; in rhine_kick_tx_threshold()
754 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); in rhine_kick_tx_threshold()
758 static void rhine_tx_err(struct rhine_private *rp, u32 status) in rhine_tx_err() argument
760 struct net_device *dev = rp->dev; in rhine_tx_err()
763 netif_info(rp, tx_err, dev, in rhine_tx_err()
768 rhine_kick_tx_threshold(rp); in rhine_tx_err()
769 netif_info(rp, tx_err ,dev, "Transmitter underrun, " in rhine_tx_err()
770 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
774 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); in rhine_tx_err()
778 rhine_kick_tx_threshold(rp); in rhine_tx_err()
779 netif_info(rp, tx_err, dev, "Unspecified error. " in rhine_tx_err()
780 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
786 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) in rhine_update_rx_crc_and_missed_errord() argument
788 void __iomem *ioaddr = rp->base; in rhine_update_rx_crc_and_missed_errord()
789 struct net_device_stats *stats = &rp->dev->stats; in rhine_update_rx_crc_and_missed_errord()
827 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); in rhine_napipoll() local
828 struct net_device *dev = rp->dev; in rhine_napipoll()
829 void __iomem *ioaddr = rp->base; in rhine_napipoll()
834 status = rhine_get_events(rp); in rhine_napipoll()
835 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); in rhine_napipoll()
843 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); in rhine_napipoll()
845 netif_warn(rp, tx_err, dev, "Tx still on\n"); in rhine_napipoll()
851 rhine_tx_err(rp, status); in rhine_napipoll()
855 spin_lock(&rp->lock); in rhine_napipoll()
856 rhine_update_rx_crc_and_missed_errord(rp); in rhine_napipoll()
857 spin_unlock(&rp->lock); in rhine_napipoll()
862 schedule_work(&rp->slow_event_task); in rhine_napipoll()
875 struct rhine_private *rp = netdev_priv(dev); in rhine_hw_init() local
881 if (rp->quirks & rqRhineI) in rhine_hw_init()
911 struct rhine_private *rp; in rhine_init_one_common() local
929 rp = netdev_priv(dev); in rhine_init_one_common()
930 rp->dev = dev; in rhine_init_one_common()
931 rp->quirks = quirks; in rhine_init_one_common()
932 rp->pioaddr = pioaddr; in rhine_init_one_common()
933 rp->base = ioaddr; in rhine_init_one_common()
934 rp->irq = irq; in rhine_init_one_common()
935 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); in rhine_init_one_common()
937 phy_id = rp->quirks & rqIntPHY ? 1 : 0; in rhine_init_one_common()
939 u64_stats_init(&rp->tx_stats.syncp); in rhine_init_one_common()
940 u64_stats_init(&rp->rx_stats.syncp); in rhine_init_one_common()
961 spin_lock_init(&rp->lock); in rhine_init_one_common()
962 mutex_init(&rp->task_lock); in rhine_init_one_common()
963 INIT_WORK(&rp->reset_task, rhine_reset_task); in rhine_init_one_common()
964 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); in rhine_init_one_common()
966 rp->mii_if.dev = dev; in rhine_init_one_common()
967 rp->mii_if.mdio_read = mdio_read; in rhine_init_one_common()
968 rp->mii_if.mdio_write = mdio_write; in rhine_init_one_common()
969 rp->mii_if.phy_id_mask = 0x1f; in rhine_init_one_common()
970 rp->mii_if.reg_num_mask = 0x1f; in rhine_init_one_common()
977 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); in rhine_init_one_common()
979 if (rp->quirks & rqRhineI) in rhine_init_one_common()
982 if (rp->quirks & rqMgmt) in rhine_init_one_common()
992 if (rp->quirks & rqRhineI) in rhine_init_one_common()
994 else if (rp->quirks & rqStatusWBRace) in rhine_init_one_common()
996 else if (rp->quirks & rqMgmt) in rhine_init_one_common()
1002 name, (long)ioaddr, dev->dev_addr, rp->irq); in rhine_init_one_common()
1012 rp->mii_if.advertising = mdio_read(dev, phy_id, 4); in rhine_init_one_common()
1016 mii_status, rp->mii_if.advertising, in rhine_init_one_common()
1027 rp->mii_if.phy_id = phy_id; in rhine_init_one_common()
1029 netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); in rhine_init_one_common()
1161 struct rhine_private *rp = netdev_priv(dev); in alloc_ring() local
1175 if (rp->quirks & rqRhineI) { in alloc_ring()
1176 rp->tx_bufs = dma_alloc_coherent(hwdev, in alloc_ring()
1178 &rp->tx_bufs_dma, in alloc_ring()
1180 if (rp->tx_bufs == NULL) { in alloc_ring()
1189 rp->rx_ring = ring; in alloc_ring()
1190 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1191 rp->rx_ring_dma = ring_dma; in alloc_ring()
1192 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1199 struct rhine_private *rp = netdev_priv(dev); in free_ring() local
1205 rp->rx_ring, rp->rx_ring_dma); in free_ring()
1206 rp->tx_ring = NULL; in free_ring()
1208 if (rp->tx_bufs) in free_ring()
1210 rp->tx_bufs, rp->tx_bufs_dma); in free_ring()
1212 rp->tx_bufs = NULL; in free_ring()
1218 struct rhine_private *rp = netdev_priv(dev); in alloc_rbufs() local
1223 rp->dirty_rx = rp->cur_rx = 0; in alloc_rbufs()
1225 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in alloc_rbufs()
1226 rp->rx_head_desc = &rp->rx_ring[0]; in alloc_rbufs()
1227 next = rp->rx_ring_dma; in alloc_rbufs()
1231 rp->rx_ring[i].rx_status = 0; in alloc_rbufs()
1232 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); in alloc_rbufs()
1234 rp->rx_ring[i].next_desc = cpu_to_le32(next); in alloc_rbufs()
1235 rp->rx_skbuff[i] = NULL; in alloc_rbufs()
1238 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); in alloc_rbufs()
1242 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz); in alloc_rbufs()
1243 rp->rx_skbuff[i] = skb; in alloc_rbufs()
1247 rp->rx_skbuff_dma[i] = in alloc_rbufs()
1248 dma_map_single(hwdev, skb->data, rp->rx_buf_sz, in alloc_rbufs()
1250 if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) { in alloc_rbufs()
1251 rp->rx_skbuff_dma[i] = 0; in alloc_rbufs()
1255 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); in alloc_rbufs()
1256 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); in alloc_rbufs()
1258 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in alloc_rbufs()
1263 struct rhine_private *rp = netdev_priv(dev); in free_rbufs() local
1269 rp->rx_ring[i].rx_status = 0; in free_rbufs()
1270 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_rbufs()
1271 if (rp->rx_skbuff[i]) { in free_rbufs()
1273 rp->rx_skbuff_dma[i], in free_rbufs()
1274 rp->rx_buf_sz, DMA_FROM_DEVICE); in free_rbufs()
1275 dev_kfree_skb(rp->rx_skbuff[i]); in free_rbufs()
1277 rp->rx_skbuff[i] = NULL; in free_rbufs()
1283 struct rhine_private *rp = netdev_priv(dev); in alloc_tbufs() local
1287 rp->dirty_tx = rp->cur_tx = 0; in alloc_tbufs()
1288 next = rp->tx_ring_dma; in alloc_tbufs()
1290 rp->tx_skbuff[i] = NULL; in alloc_tbufs()
1291 rp->tx_ring[i].tx_status = 0; in alloc_tbufs()
1292 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in alloc_tbufs()
1294 rp->tx_ring[i].next_desc = cpu_to_le32(next); in alloc_tbufs()
1295 if (rp->quirks & rqRhineI) in alloc_tbufs()
1296 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; in alloc_tbufs()
1298 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); in alloc_tbufs()
1305 struct rhine_private *rp = netdev_priv(dev); in free_tbufs() local
1310 rp->tx_ring[i].tx_status = 0; in free_tbufs()
1311 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in free_tbufs()
1312 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_tbufs()
1313 if (rp->tx_skbuff[i]) { in free_tbufs()
1314 if (rp->tx_skbuff_dma[i]) { in free_tbufs()
1316 rp->tx_skbuff_dma[i], in free_tbufs()
1317 rp->tx_skbuff[i]->len, in free_tbufs()
1320 dev_kfree_skb(rp->tx_skbuff[i]); in free_tbufs()
1322 rp->tx_skbuff[i] = NULL; in free_tbufs()
1323 rp->tx_buf[i] = NULL; in free_tbufs()
1329 struct rhine_private *rp = netdev_priv(dev); in rhine_check_media() local
1330 void __iomem *ioaddr = rp->base; in rhine_check_media()
1332 if (!rp->mii_if.force_media) in rhine_check_media()
1333 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); in rhine_check_media()
1335 if (rp->mii_if.full_duplex) in rhine_check_media()
1342 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_check_media()
1343 rp->mii_if.force_media, netif_carrier_ok(dev)); in rhine_check_media()
1350 struct rhine_private *rp = netdev_priv(dev); in rhine_set_carrier() local
1360 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_set_carrier()
1470 struct rhine_private *rp = netdev_priv(dev); in rhine_init_cam_filter() local
1471 void __iomem *ioaddr = rp->base; in rhine_init_cam_filter()
1490 struct rhine_private *rp = netdev_priv(dev); in rhine_update_vcam() local
1491 void __iomem *ioaddr = rp->base; in rhine_update_vcam()
1496 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { in rhine_update_vcam()
1507 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_add_vid() local
1509 spin_lock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1510 set_bit(vid, rp->active_vlans); in rhine_vlan_rx_add_vid()
1512 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1518 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_kill_vid() local
1520 spin_lock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1521 clear_bit(vid, rp->active_vlans); in rhine_vlan_rx_kill_vid()
1523 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1529 struct rhine_private *rp = netdev_priv(dev); in init_registers() local
1530 void __iomem *ioaddr = rp->base; in init_registers()
1540 rp->tx_thresh = 0x20; in init_registers()
1541 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ in init_registers()
1543 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); in init_registers()
1544 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); in init_registers()
1548 if (rp->quirks & rqMgmt) in init_registers()
1551 napi_enable(&rp->napi); in init_registers()
1561 static void rhine_enable_linkmon(struct rhine_private *rp) in rhine_enable_linkmon() argument
1563 void __iomem *ioaddr = rp->base; in rhine_enable_linkmon()
1569 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_enable_linkmon()
1575 static void rhine_disable_linkmon(struct rhine_private *rp) in rhine_disable_linkmon() argument
1577 void __iomem *ioaddr = rp->base; in rhine_disable_linkmon()
1581 if (rp->quirks & rqRhineI) { in rhine_disable_linkmon()
1590 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_disable_linkmon()
1596 rhine_wait_bit_high(rp, MIIRegAddr, 0x80); in rhine_disable_linkmon()
1603 struct rhine_private *rp = netdev_priv(dev); in mdio_read() local
1604 void __iomem *ioaddr = rp->base; in mdio_read()
1607 rhine_disable_linkmon(rp); in mdio_read()
1613 rhine_wait_bit_low(rp, MIICmd, 0x40); in mdio_read()
1616 rhine_enable_linkmon(rp); in mdio_read()
1622 struct rhine_private *rp = netdev_priv(dev); in mdio_write() local
1623 void __iomem *ioaddr = rp->base; in mdio_write()
1625 rhine_disable_linkmon(rp); in mdio_write()
1632 rhine_wait_bit_low(rp, MIICmd, 0x20); in mdio_write()
1634 rhine_enable_linkmon(rp); in mdio_write()
1637 static void rhine_task_disable(struct rhine_private *rp) in rhine_task_disable() argument
1639 mutex_lock(&rp->task_lock); in rhine_task_disable()
1640 rp->task_enable = false; in rhine_task_disable()
1641 mutex_unlock(&rp->task_lock); in rhine_task_disable()
1643 cancel_work_sync(&rp->slow_event_task); in rhine_task_disable()
1644 cancel_work_sync(&rp->reset_task); in rhine_task_disable()
1647 static void rhine_task_enable(struct rhine_private *rp) in rhine_task_enable() argument
1649 mutex_lock(&rp->task_lock); in rhine_task_enable()
1650 rp->task_enable = true; in rhine_task_enable()
1651 mutex_unlock(&rp->task_lock); in rhine_task_enable()
1656 struct rhine_private *rp = netdev_priv(dev); in rhine_open() local
1657 void __iomem *ioaddr = rp->base; in rhine_open()
1660 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); in rhine_open()
1664 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); in rhine_open()
1668 free_irq(rp->irq, dev); in rhine_open()
1674 rhine_task_enable(rp); in rhine_open()
1677 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", in rhine_open()
1679 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_open()
1688 struct rhine_private *rp = container_of(work, struct rhine_private, in rhine_reset_task() local
1690 struct net_device *dev = rp->dev; in rhine_reset_task()
1692 mutex_lock(&rp->task_lock); in rhine_reset_task()
1694 if (!rp->task_enable) in rhine_reset_task()
1697 napi_disable(&rp->napi); in rhine_reset_task()
1699 spin_lock_bh(&rp->lock); in rhine_reset_task()
1711 spin_unlock_bh(&rp->lock); in rhine_reset_task()
1718 mutex_unlock(&rp->task_lock); in rhine_reset_task()
1723 struct rhine_private *rp = netdev_priv(dev); in rhine_tx_timeout() local
1724 void __iomem *ioaddr = rp->base; in rhine_tx_timeout()
1728 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_tx_timeout()
1730 schedule_work(&rp->reset_task); in rhine_tx_timeout()
1736 struct rhine_private *rp = netdev_priv(dev); in rhine_start_tx() local
1738 void __iomem *ioaddr = rp->base; in rhine_start_tx()
1745 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx()
1750 rp->tx_skbuff[entry] = skb; in rhine_start_tx()
1752 if ((rp->quirks & rqRhineI) && in rhine_start_tx()
1758 rp->tx_skbuff[entry] = NULL; in rhine_start_tx()
1764 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); in rhine_start_tx()
1766 memset(rp->tx_buf[entry] + skb->len, 0, in rhine_start_tx()
1768 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1769 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + in rhine_start_tx()
1770 (rp->tx_buf[entry] - in rhine_start_tx()
1771 rp->tx_bufs)); in rhine_start_tx()
1773 rp->tx_skbuff_dma[entry] = in rhine_start_tx()
1776 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { in rhine_start_tx()
1778 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1782 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); in rhine_start_tx()
1785 rp->tx_ring[entry].desc_length = in rhine_start_tx()
1794 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); in rhine_start_tx()
1796 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); in rhine_start_tx()
1799 rp->tx_ring[entry].tx_status = 0; in rhine_start_tx()
1804 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); in rhine_start_tx()
1807 rp->cur_tx++; in rhine_start_tx()
1820 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) in rhine_start_tx()
1823 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", in rhine_start_tx()
1824 rp->cur_tx - 1, entry); in rhine_start_tx()
1829 static void rhine_irq_disable(struct rhine_private *rp) in rhine_irq_disable() argument
1831 iowrite16(0x0000, rp->base + IntrEnable); in rhine_irq_disable()
1840 struct rhine_private *rp = netdev_priv(dev); in rhine_interrupt() local
1844 status = rhine_get_events(rp); in rhine_interrupt()
1846 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); in rhine_interrupt()
1851 rhine_irq_disable(rp); in rhine_interrupt()
1852 napi_schedule(&rp->napi); in rhine_interrupt()
1856 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", in rhine_interrupt()
1867 struct rhine_private *rp = netdev_priv(dev); in rhine_tx() local
1869 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE; in rhine_tx()
1874 while (rp->dirty_tx != rp->cur_tx) { in rhine_tx()
1875 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); in rhine_tx()
1876 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", in rhine_tx()
1880 skb = rp->tx_skbuff[entry]; in rhine_tx()
1882 netif_dbg(rp, tx_done, dev, in rhine_tx()
1893 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || in rhine_tx()
1896 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); in rhine_tx()
1901 if (rp->quirks & rqRhineI) in rhine_tx()
1905 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", in rhine_tx()
1908 u64_stats_update_begin(&rp->tx_stats.syncp); in rhine_tx()
1909 rp->tx_stats.bytes += skb->len; in rhine_tx()
1910 rp->tx_stats.packets++; in rhine_tx()
1911 u64_stats_update_end(&rp->tx_stats.syncp); in rhine_tx()
1914 if (rp->tx_skbuff_dma[entry]) { in rhine_tx()
1916 rp->tx_skbuff_dma[entry], in rhine_tx()
1923 rp->tx_skbuff[entry] = NULL; in rhine_tx()
1924 entry = (++rp->dirty_tx) % TX_RING_SIZE; in rhine_tx()
1928 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4) in rhine_tx()
1950 struct rhine_private *rp = netdev_priv(dev); in rhine_rx() local
1953 int entry = rp->cur_rx % RX_RING_SIZE; in rhine_rx()
1955 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, in rhine_rx()
1956 entry, le32_to_cpu(rp->rx_head_desc->rx_status)); in rhine_rx()
1960 struct rx_desc *desc = rp->rx_head_desc; in rhine_rx()
1968 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, in rhine_rx()
1980 rp->rx_head_desc, in rhine_rx()
1981 &rp->rx_ring[entry]); in rhine_rx()
1985 netif_dbg(rp, rx_err, dev, in rhine_rx()
1997 spin_lock(&rp->lock); in rhine_rx()
1999 spin_unlock(&rp->lock); in rhine_rx()
2014 rp->rx_skbuff_dma[entry], in rhine_rx()
2015 rp->rx_buf_sz, in rhine_rx()
2019 rp->rx_skbuff[entry]->data, in rhine_rx()
2023 rp->rx_skbuff_dma[entry], in rhine_rx()
2024 rp->rx_buf_sz, in rhine_rx()
2027 skb = rp->rx_skbuff[entry]; in rhine_rx()
2032 rp->rx_skbuff[entry] = NULL; in rhine_rx()
2035 rp->rx_skbuff_dma[entry], in rhine_rx()
2036 rp->rx_buf_sz, in rhine_rx()
2049 u64_stats_update_begin(&rp->rx_stats.syncp); in rhine_rx()
2050 rp->rx_stats.bytes += pkt_len; in rhine_rx()
2051 rp->rx_stats.packets++; in rhine_rx()
2052 u64_stats_update_end(&rp->rx_stats.syncp); in rhine_rx()
2054 entry = (++rp->cur_rx) % RX_RING_SIZE; in rhine_rx()
2055 rp->rx_head_desc = &rp->rx_ring[entry]; in rhine_rx()
2059 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) { in rhine_rx()
2061 entry = rp->dirty_rx % RX_RING_SIZE; in rhine_rx()
2062 if (rp->rx_skbuff[entry] == NULL) { in rhine_rx()
2063 skb = netdev_alloc_skb(dev, rp->rx_buf_sz); in rhine_rx()
2064 rp->rx_skbuff[entry] = skb; in rhine_rx()
2067 rp->rx_skbuff_dma[entry] = in rhine_rx()
2069 rp->rx_buf_sz, in rhine_rx()
2072 rp->rx_skbuff_dma[entry])) { in rhine_rx()
2074 rp->rx_skbuff_dma[entry] = 0; in rhine_rx()
2077 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); in rhine_rx()
2079 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); in rhine_rx()
2086 struct rhine_private *rp = netdev_priv(dev); in rhine_restart_tx() local
2087 void __iomem *ioaddr = rp->base; in rhine_restart_tx()
2088 int entry = rp->dirty_tx % TX_RING_SIZE; in rhine_restart_tx()
2095 intr_status = rhine_get_events(rp); in rhine_restart_tx()
2100 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), in rhine_restart_tx()
2106 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) in rhine_restart_tx()
2116 netif_warn(rp, tx_err, dev, "another error occurred %08x\n", in rhine_restart_tx()
2124 struct rhine_private *rp = in rhine_slow_event_task() local
2126 struct net_device *dev = rp->dev; in rhine_slow_event_task()
2129 mutex_lock(&rp->task_lock); in rhine_slow_event_task()
2131 if (!rp->task_enable) in rhine_slow_event_task()
2134 intr_status = rhine_get_events(rp); in rhine_slow_event_task()
2135 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); in rhine_slow_event_task()
2141 netif_warn(rp, hw, dev, "PCI error\n"); in rhine_slow_event_task()
2143 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); in rhine_slow_event_task()
2146 mutex_unlock(&rp->task_lock); in rhine_slow_event_task()
2152 struct rhine_private *rp = netdev_priv(dev); in rhine_get_stats64() local
2155 spin_lock_bh(&rp->lock); in rhine_get_stats64()
2156 rhine_update_rx_crc_and_missed_errord(rp); in rhine_get_stats64()
2157 spin_unlock_bh(&rp->lock); in rhine_get_stats64()
2162 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); in rhine_get_stats64()
2163 stats->rx_packets = rp->rx_stats.packets; in rhine_get_stats64()
2164 stats->rx_bytes = rp->rx_stats.bytes; in rhine_get_stats64()
2165 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); in rhine_get_stats64()
2168 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); in rhine_get_stats64()
2169 stats->tx_packets = rp->tx_stats.packets; in rhine_get_stats64()
2170 stats->tx_bytes = rp->tx_stats.bytes; in rhine_get_stats64()
2171 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); in rhine_get_stats64()
2178 struct rhine_private *rp = netdev_priv(dev); in rhine_set_rx_mode() local
2179 void __iomem *ioaddr = rp->base; in rhine_set_rx_mode()
2193 } else if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2215 if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2235 struct rhine_private *rp = netdev_priv(dev); in netdev_get_settings() local
2238 mutex_lock(&rp->task_lock); in netdev_get_settings()
2239 rc = mii_ethtool_gset(&rp->mii_if, cmd); in netdev_get_settings()
2240 mutex_unlock(&rp->task_lock); in netdev_get_settings()
2247 struct rhine_private *rp = netdev_priv(dev); in netdev_set_settings() local
2250 mutex_lock(&rp->task_lock); in netdev_set_settings()
2251 rc = mii_ethtool_sset(&rp->mii_if, cmd); in netdev_set_settings()
2252 rhine_set_carrier(&rp->mii_if); in netdev_set_settings()
2253 mutex_unlock(&rp->task_lock); in netdev_set_settings()
2260 struct rhine_private *rp = netdev_priv(dev); in netdev_nway_reset() local
2262 return mii_nway_restart(&rp->mii_if); in netdev_nway_reset()
2267 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link() local
2269 return mii_link_ok(&rp->mii_if); in netdev_get_link()
2274 struct rhine_private *rp = netdev_priv(dev); in netdev_get_msglevel() local
2276 return rp->msg_enable; in netdev_get_msglevel()
2281 struct rhine_private *rp = netdev_priv(dev); in netdev_set_msglevel() local
2283 rp->msg_enable = value; in netdev_set_msglevel()
2288 struct rhine_private *rp = netdev_priv(dev); in rhine_get_wol() local
2290 if (!(rp->quirks & rqWOL)) in rhine_get_wol()
2293 spin_lock_irq(&rp->lock); in rhine_get_wol()
2296 wol->wolopts = rp->wolopts; in rhine_get_wol()
2297 spin_unlock_irq(&rp->lock); in rhine_get_wol()
2302 struct rhine_private *rp = netdev_priv(dev); in rhine_set_wol() local
2306 if (!(rp->quirks & rqWOL)) in rhine_set_wol()
2312 spin_lock_irq(&rp->lock); in rhine_set_wol()
2313 rp->wolopts = wol->wolopts; in rhine_set_wol()
2314 spin_unlock_irq(&rp->lock); in rhine_set_wol()
2333 struct rhine_private *rp = netdev_priv(dev); in netdev_ioctl() local
2339 mutex_lock(&rp->task_lock); in netdev_ioctl()
2340 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
2341 rhine_set_carrier(&rp->mii_if); in netdev_ioctl()
2342 mutex_unlock(&rp->task_lock); in netdev_ioctl()
2349 struct rhine_private *rp = netdev_priv(dev); in rhine_close() local
2350 void __iomem *ioaddr = rp->base; in rhine_close()
2352 rhine_task_disable(rp); in rhine_close()
2353 napi_disable(&rp->napi); in rhine_close()
2356 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", in rhine_close()
2360 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); in rhine_close()
2362 rhine_irq_disable(rp); in rhine_close()
2367 free_irq(rp->irq, dev); in rhine_close()
2379 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_pci() local
2383 pci_iounmap(pdev, rp->base); in rhine_remove_one_pci()
2393 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_platform() local
2397 iounmap(rp->base); in rhine_remove_one_platform()
2407 struct rhine_private *rp = netdev_priv(dev); in rhine_shutdown_pci() local
2408 void __iomem *ioaddr = rp->base; in rhine_shutdown_pci()
2410 if (!(rp->quirks & rqWOL)) in rhine_shutdown_pci()
2416 if (rp->quirks & rq6patterns) in rhine_shutdown_pci()
2419 spin_lock(&rp->lock); in rhine_shutdown_pci()
2421 if (rp->wolopts & WAKE_MAGIC) { in rhine_shutdown_pci()
2430 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) in rhine_shutdown_pci()
2433 if (rp->wolopts & WAKE_PHY) in rhine_shutdown_pci()
2436 if (rp->wolopts & WAKE_UCAST) in rhine_shutdown_pci()
2439 if (rp->wolopts) { in rhine_shutdown_pci()
2445 spin_unlock(&rp->lock); in rhine_shutdown_pci()
2459 struct rhine_private *rp = netdev_priv(dev); in rhine_suspend() local
2464 rhine_task_disable(rp); in rhine_suspend()
2465 rhine_irq_disable(rp); in rhine_suspend()
2466 napi_disable(&rp->napi); in rhine_suspend()
2479 struct rhine_private *rp = netdev_priv(dev); in rhine_resume() local
2484 enable_mmio(rp->pioaddr, rp->quirks); in rhine_resume()
2490 rhine_task_enable(rp); in rhine_resume()
2491 spin_lock_bh(&rp->lock); in rhine_resume()
2493 spin_unlock_bh(&rp->lock); in rhine_resume()