Lines Matching refs:rp

527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)  in rhine_wait_bit()  argument
529 void __iomem *ioaddr = rp->base; in rhine_wait_bit()
540 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " in rhine_wait_bit()
545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_high() argument
547 rhine_wait_bit(rp, reg, mask, false); in rhine_wait_bit_high()
550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_low() argument
552 rhine_wait_bit(rp, reg, mask, true); in rhine_wait_bit_low()
555 static u32 rhine_get_events(struct rhine_private *rp) in rhine_get_events() argument
557 void __iomem *ioaddr = rp->base; in rhine_get_events()
562 if (rp->quirks & rqStatusWBRace) in rhine_get_events()
567 static void rhine_ack_events(struct rhine_private *rp, u32 mask) in rhine_ack_events() argument
569 void __iomem *ioaddr = rp->base; in rhine_ack_events()
571 if (rp->quirks & rqStatusWBRace) in rhine_ack_events()
583 struct rhine_private *rp = netdev_priv(dev); in rhine_power_init() local
584 void __iomem *ioaddr = rp->base; in rhine_power_init()
587 if (rp->quirks & rqWOL) { in rhine_power_init()
597 if (rp->quirks & rq6patterns) in rhine_power_init()
602 if (rp->quirks & rq6patterns) in rhine_power_init()
607 if (rp->quirks & rq6patterns) in rhine_power_init()
639 struct rhine_private *rp = netdev_priv(dev); in rhine_chip_reset() local
640 void __iomem *ioaddr = rp->base; in rhine_chip_reset()
650 if (rp->quirks & rqForceReset) in rhine_chip_reset()
654 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); in rhine_chip_reset()
658 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? in rhine_chip_reset()
709 struct rhine_private *rp = netdev_priv(dev); in rhine_reload_eeprom() local
710 void __iomem *ioaddr = rp->base; in rhine_reload_eeprom()
726 enable_mmio(pioaddr, rp->quirks); in rhine_reload_eeprom()
729 if (rp->quirks & rqWOL) in rhine_reload_eeprom()
737 struct rhine_private *rp = netdev_priv(dev); in rhine_poll() local
738 const int irq = rp->irq; in rhine_poll()
746 static void rhine_kick_tx_threshold(struct rhine_private *rp) in rhine_kick_tx_threshold() argument
748 if (rp->tx_thresh < 0xe0) { in rhine_kick_tx_threshold()
749 void __iomem *ioaddr = rp->base; in rhine_kick_tx_threshold()
751 rp->tx_thresh += 0x20; in rhine_kick_tx_threshold()
752 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); in rhine_kick_tx_threshold()
756 static void rhine_tx_err(struct rhine_private *rp, u32 status) in rhine_tx_err() argument
758 struct net_device *dev = rp->dev; in rhine_tx_err()
761 netif_info(rp, tx_err, dev, in rhine_tx_err()
766 rhine_kick_tx_threshold(rp); in rhine_tx_err()
767 netif_info(rp, tx_err ,dev, "Transmitter underrun, " in rhine_tx_err()
768 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
772 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); in rhine_tx_err()
776 rhine_kick_tx_threshold(rp); in rhine_tx_err()
777 netif_info(rp, tx_err, dev, "Unspecified error. " in rhine_tx_err()
778 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
784 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) in rhine_update_rx_crc_and_missed_errord() argument
786 void __iomem *ioaddr = rp->base; in rhine_update_rx_crc_and_missed_errord()
787 struct net_device_stats *stats = &rp->dev->stats; in rhine_update_rx_crc_and_missed_errord()
825 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); in rhine_napipoll() local
826 struct net_device *dev = rp->dev; in rhine_napipoll()
827 void __iomem *ioaddr = rp->base; in rhine_napipoll()
832 status = rhine_get_events(rp); in rhine_napipoll()
833 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); in rhine_napipoll()
841 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); in rhine_napipoll()
843 netif_warn(rp, tx_err, dev, "Tx still on\n"); in rhine_napipoll()
849 rhine_tx_err(rp, status); in rhine_napipoll()
853 spin_lock(&rp->lock); in rhine_napipoll()
854 rhine_update_rx_crc_and_missed_errord(rp); in rhine_napipoll()
855 spin_unlock(&rp->lock); in rhine_napipoll()
860 schedule_work(&rp->slow_event_task); in rhine_napipoll()
873 struct rhine_private *rp = netdev_priv(dev); in rhine_hw_init() local
879 if (rp->quirks & rqRhineI) in rhine_hw_init()
909 struct rhine_private *rp; in rhine_init_one_common() local
927 rp = netdev_priv(dev); in rhine_init_one_common()
928 rp->dev = dev; in rhine_init_one_common()
929 rp->quirks = quirks; in rhine_init_one_common()
930 rp->pioaddr = pioaddr; in rhine_init_one_common()
931 rp->base = ioaddr; in rhine_init_one_common()
932 rp->irq = irq; in rhine_init_one_common()
933 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); in rhine_init_one_common()
935 phy_id = rp->quirks & rqIntPHY ? 1 : 0; in rhine_init_one_common()
937 u64_stats_init(&rp->tx_stats.syncp); in rhine_init_one_common()
938 u64_stats_init(&rp->rx_stats.syncp); in rhine_init_one_common()
959 spin_lock_init(&rp->lock); in rhine_init_one_common()
960 mutex_init(&rp->task_lock); in rhine_init_one_common()
961 INIT_WORK(&rp->reset_task, rhine_reset_task); in rhine_init_one_common()
962 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); in rhine_init_one_common()
964 rp->mii_if.dev = dev; in rhine_init_one_common()
965 rp->mii_if.mdio_read = mdio_read; in rhine_init_one_common()
966 rp->mii_if.mdio_write = mdio_write; in rhine_init_one_common()
967 rp->mii_if.phy_id_mask = 0x1f; in rhine_init_one_common()
968 rp->mii_if.reg_num_mask = 0x1f; in rhine_init_one_common()
975 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); in rhine_init_one_common()
977 if (rp->quirks & rqRhineI) in rhine_init_one_common()
980 if (rp->quirks & rqMgmt) in rhine_init_one_common()
990 if (rp->quirks & rqRhineI) in rhine_init_one_common()
992 else if (rp->quirks & rqStatusWBRace) in rhine_init_one_common()
994 else if (rp->quirks & rqMgmt) in rhine_init_one_common()
1000 name, (long)ioaddr, dev->dev_addr, rp->irq); in rhine_init_one_common()
1010 rp->mii_if.advertising = mdio_read(dev, phy_id, 4); in rhine_init_one_common()
1014 mii_status, rp->mii_if.advertising, in rhine_init_one_common()
1025 rp->mii_if.phy_id = phy_id; in rhine_init_one_common()
1027 netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); in rhine_init_one_common()
1159 struct rhine_private *rp = netdev_priv(dev); in alloc_ring() local
1173 if (rp->quirks & rqRhineI) { in alloc_ring()
1174 rp->tx_bufs = dma_alloc_coherent(hwdev, in alloc_ring()
1176 &rp->tx_bufs_dma, in alloc_ring()
1178 if (rp->tx_bufs == NULL) { in alloc_ring()
1187 rp->rx_ring = ring; in alloc_ring()
1188 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1189 rp->rx_ring_dma = ring_dma; in alloc_ring()
1190 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1197 struct rhine_private *rp = netdev_priv(dev); in free_ring() local
1203 rp->rx_ring, rp->rx_ring_dma); in free_ring()
1204 rp->tx_ring = NULL; in free_ring()
1206 if (rp->tx_bufs) in free_ring()
1208 rp->tx_bufs, rp->tx_bufs_dma); in free_ring()
1210 rp->tx_bufs = NULL; in free_ring()
1222 struct rhine_private *rp = netdev_priv(dev); in rhine_skb_dma_init() local
1224 const int size = rp->rx_buf_sz; in rhine_skb_dma_init()
1232 netif_err(rp, drv, dev, "Rx DMA mapping failure\n"); in rhine_skb_dma_init()
1240 static void rhine_reset_rbufs(struct rhine_private *rp) in rhine_reset_rbufs() argument
1244 rp->cur_rx = 0; in rhine_reset_rbufs()
1247 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); in rhine_reset_rbufs()
1250 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, in rhine_skb_dma_nic_store() argument
1253 rp->rx_skbuff_dma[entry] = sd->dma; in rhine_skb_dma_nic_store()
1254 rp->rx_skbuff[entry] = sd->skb; in rhine_skb_dma_nic_store()
1256 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); in rhine_skb_dma_nic_store()
1264 struct rhine_private *rp = netdev_priv(dev); in alloc_rbufs() local
1268 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in alloc_rbufs()
1269 next = rp->rx_ring_dma; in alloc_rbufs()
1273 rp->rx_ring[i].rx_status = 0; in alloc_rbufs()
1274 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); in alloc_rbufs()
1276 rp->rx_ring[i].next_desc = cpu_to_le32(next); in alloc_rbufs()
1277 rp->rx_skbuff[i] = NULL; in alloc_rbufs()
1280 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); in alloc_rbufs()
1292 rhine_skb_dma_nic_store(rp, &sd, i); in alloc_rbufs()
1295 rhine_reset_rbufs(rp); in alloc_rbufs()
1302 struct rhine_private *rp = netdev_priv(dev); in free_rbufs() local
1308 rp->rx_ring[i].rx_status = 0; in free_rbufs()
1309 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_rbufs()
1310 if (rp->rx_skbuff[i]) { in free_rbufs()
1312 rp->rx_skbuff_dma[i], in free_rbufs()
1313 rp->rx_buf_sz, DMA_FROM_DEVICE); in free_rbufs()
1314 dev_kfree_skb(rp->rx_skbuff[i]); in free_rbufs()
1316 rp->rx_skbuff[i] = NULL; in free_rbufs()
1322 struct rhine_private *rp = netdev_priv(dev); in alloc_tbufs() local
1326 rp->dirty_tx = rp->cur_tx = 0; in alloc_tbufs()
1327 next = rp->tx_ring_dma; in alloc_tbufs()
1329 rp->tx_skbuff[i] = NULL; in alloc_tbufs()
1330 rp->tx_ring[i].tx_status = 0; in alloc_tbufs()
1331 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in alloc_tbufs()
1333 rp->tx_ring[i].next_desc = cpu_to_le32(next); in alloc_tbufs()
1334 if (rp->quirks & rqRhineI) in alloc_tbufs()
1335 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; in alloc_tbufs()
1337 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); in alloc_tbufs()
1344 struct rhine_private *rp = netdev_priv(dev); in free_tbufs() local
1349 rp->tx_ring[i].tx_status = 0; in free_tbufs()
1350 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in free_tbufs()
1351 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_tbufs()
1352 if (rp->tx_skbuff[i]) { in free_tbufs()
1353 if (rp->tx_skbuff_dma[i]) { in free_tbufs()
1355 rp->tx_skbuff_dma[i], in free_tbufs()
1356 rp->tx_skbuff[i]->len, in free_tbufs()
1359 dev_kfree_skb(rp->tx_skbuff[i]); in free_tbufs()
1361 rp->tx_skbuff[i] = NULL; in free_tbufs()
1362 rp->tx_buf[i] = NULL; in free_tbufs()
1368 struct rhine_private *rp = netdev_priv(dev); in rhine_check_media() local
1369 void __iomem *ioaddr = rp->base; in rhine_check_media()
1371 if (!rp->mii_if.force_media) in rhine_check_media()
1372 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); in rhine_check_media()
1374 if (rp->mii_if.full_duplex) in rhine_check_media()
1381 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_check_media()
1382 rp->mii_if.force_media, netif_carrier_ok(dev)); in rhine_check_media()
1389 struct rhine_private *rp = netdev_priv(dev); in rhine_set_carrier() local
1399 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_set_carrier()
1509 struct rhine_private *rp = netdev_priv(dev); in rhine_init_cam_filter() local
1510 void __iomem *ioaddr = rp->base; in rhine_init_cam_filter()
1529 struct rhine_private *rp = netdev_priv(dev); in rhine_update_vcam() local
1530 void __iomem *ioaddr = rp->base; in rhine_update_vcam()
1535 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { in rhine_update_vcam()
1546 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_add_vid() local
1548 spin_lock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1549 set_bit(vid, rp->active_vlans); in rhine_vlan_rx_add_vid()
1551 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1557 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_kill_vid() local
1559 spin_lock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1560 clear_bit(vid, rp->active_vlans); in rhine_vlan_rx_kill_vid()
1562 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1568 struct rhine_private *rp = netdev_priv(dev); in init_registers() local
1569 void __iomem *ioaddr = rp->base; in init_registers()
1579 rp->tx_thresh = 0x20; in init_registers()
1580 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ in init_registers()
1582 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); in init_registers()
1583 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); in init_registers()
1587 if (rp->quirks & rqMgmt) in init_registers()
1590 napi_enable(&rp->napi); in init_registers()
1600 static void rhine_enable_linkmon(struct rhine_private *rp) in rhine_enable_linkmon() argument
1602 void __iomem *ioaddr = rp->base; in rhine_enable_linkmon()
1608 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_enable_linkmon()
1614 static void rhine_disable_linkmon(struct rhine_private *rp) in rhine_disable_linkmon() argument
1616 void __iomem *ioaddr = rp->base; in rhine_disable_linkmon()
1620 if (rp->quirks & rqRhineI) { in rhine_disable_linkmon()
1629 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_disable_linkmon()
1635 rhine_wait_bit_high(rp, MIIRegAddr, 0x80); in rhine_disable_linkmon()
1642 struct rhine_private *rp = netdev_priv(dev); in mdio_read() local
1643 void __iomem *ioaddr = rp->base; in mdio_read()
1646 rhine_disable_linkmon(rp); in mdio_read()
1652 rhine_wait_bit_low(rp, MIICmd, 0x40); in mdio_read()
1655 rhine_enable_linkmon(rp); in mdio_read()
1661 struct rhine_private *rp = netdev_priv(dev); in mdio_write() local
1662 void __iomem *ioaddr = rp->base; in mdio_write()
1664 rhine_disable_linkmon(rp); in mdio_write()
1671 rhine_wait_bit_low(rp, MIICmd, 0x20); in mdio_write()
1673 rhine_enable_linkmon(rp); in mdio_write()
1676 static void rhine_task_disable(struct rhine_private *rp) in rhine_task_disable() argument
1678 mutex_lock(&rp->task_lock); in rhine_task_disable()
1679 rp->task_enable = false; in rhine_task_disable()
1680 mutex_unlock(&rp->task_lock); in rhine_task_disable()
1682 cancel_work_sync(&rp->slow_event_task); in rhine_task_disable()
1683 cancel_work_sync(&rp->reset_task); in rhine_task_disable()
1686 static void rhine_task_enable(struct rhine_private *rp) in rhine_task_enable() argument
1688 mutex_lock(&rp->task_lock); in rhine_task_enable()
1689 rp->task_enable = true; in rhine_task_enable()
1690 mutex_unlock(&rp->task_lock); in rhine_task_enable()
1695 struct rhine_private *rp = netdev_priv(dev); in rhine_open() local
1696 void __iomem *ioaddr = rp->base; in rhine_open()
1699 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); in rhine_open()
1703 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); in rhine_open()
1715 rhine_task_enable(rp); in rhine_open()
1718 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", in rhine_open()
1720 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_open()
1730 free_irq(rp->irq, dev); in rhine_open()
1736 struct rhine_private *rp = container_of(work, struct rhine_private, in rhine_reset_task() local
1738 struct net_device *dev = rp->dev; in rhine_reset_task()
1740 mutex_lock(&rp->task_lock); in rhine_reset_task()
1742 if (!rp->task_enable) in rhine_reset_task()
1745 napi_disable(&rp->napi); in rhine_reset_task()
1747 spin_lock_bh(&rp->lock); in rhine_reset_task()
1753 rhine_reset_rbufs(rp); in rhine_reset_task()
1759 spin_unlock_bh(&rp->lock); in rhine_reset_task()
1766 mutex_unlock(&rp->task_lock); in rhine_reset_task()
1771 struct rhine_private *rp = netdev_priv(dev); in rhine_tx_timeout() local
1772 void __iomem *ioaddr = rp->base; in rhine_tx_timeout()
1776 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_tx_timeout()
1778 schedule_work(&rp->reset_task); in rhine_tx_timeout()
1781 static inline bool rhine_tx_queue_full(struct rhine_private *rp) in rhine_tx_queue_full() argument
1783 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; in rhine_tx_queue_full()
1789 struct rhine_private *rp = netdev_priv(dev); in rhine_start_tx() local
1791 void __iomem *ioaddr = rp->base; in rhine_start_tx()
1798 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx()
1803 rp->tx_skbuff[entry] = skb; in rhine_start_tx()
1805 if ((rp->quirks & rqRhineI) && in rhine_start_tx()
1811 rp->tx_skbuff[entry] = NULL; in rhine_start_tx()
1817 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); in rhine_start_tx()
1819 memset(rp->tx_buf[entry] + skb->len, 0, in rhine_start_tx()
1821 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1822 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + in rhine_start_tx()
1823 (rp->tx_buf[entry] - in rhine_start_tx()
1824 rp->tx_bufs)); in rhine_start_tx()
1826 rp->tx_skbuff_dma[entry] = in rhine_start_tx()
1829 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { in rhine_start_tx()
1831 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1835 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); in rhine_start_tx()
1838 rp->tx_ring[entry].desc_length = in rhine_start_tx()
1847 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); in rhine_start_tx()
1849 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); in rhine_start_tx()
1852 rp->tx_ring[entry].tx_status = 0; in rhine_start_tx()
1857 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); in rhine_start_tx()
1860 rp->cur_tx++; in rhine_start_tx()
1880 if (rhine_tx_queue_full(rp)) { in rhine_start_tx()
1884 if (!rhine_tx_queue_full(rp)) in rhine_start_tx()
1888 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", in rhine_start_tx()
1889 rp->cur_tx - 1, entry); in rhine_start_tx()
1894 static void rhine_irq_disable(struct rhine_private *rp) in rhine_irq_disable() argument
1896 iowrite16(0x0000, rp->base + IntrEnable); in rhine_irq_disable()
1905 struct rhine_private *rp = netdev_priv(dev); in rhine_interrupt() local
1909 status = rhine_get_events(rp); in rhine_interrupt()
1911 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); in rhine_interrupt()
1916 rhine_irq_disable(rp); in rhine_interrupt()
1917 napi_schedule(&rp->napi); in rhine_interrupt()
1921 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", in rhine_interrupt()
1932 struct rhine_private *rp = netdev_priv(dev); in rhine_tx() local
1935 unsigned int dirty_tx = rp->dirty_tx; in rhine_tx()
1946 cur_tx = rp->cur_tx; in rhine_tx()
1950 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); in rhine_tx()
1952 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", in rhine_tx()
1956 skb = rp->tx_skbuff[entry]; in rhine_tx()
1958 netif_dbg(rp, tx_done, dev, in rhine_tx()
1969 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || in rhine_tx()
1972 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); in rhine_tx()
1977 if (rp->quirks & rqRhineI) in rhine_tx()
1981 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", in rhine_tx()
1984 u64_stats_update_begin(&rp->tx_stats.syncp); in rhine_tx()
1985 rp->tx_stats.bytes += skb->len; in rhine_tx()
1986 rp->tx_stats.packets++; in rhine_tx()
1987 u64_stats_update_end(&rp->tx_stats.syncp); in rhine_tx()
1990 if (rp->tx_skbuff_dma[entry]) { in rhine_tx()
1992 rp->tx_skbuff_dma[entry], in rhine_tx()
1999 rp->tx_skbuff[entry] = NULL; in rhine_tx()
2003 rp->dirty_tx = dirty_tx; in rhine_tx()
2010 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { in rhine_tx()
2014 if (rhine_tx_queue_full(rp)) in rhine_tx()
2049 struct rhine_private *rp = netdev_priv(dev); in rhine_rx() local
2051 int entry = rp->cur_rx % RX_RING_SIZE; in rhine_rx()
2054 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, in rhine_rx()
2055 entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); in rhine_rx()
2059 struct rx_desc *desc = rp->rx_ring + entry; in rhine_rx()
2066 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, in rhine_rx()
2079 netif_dbg(rp, rx_err, dev, in rhine_rx()
2091 spin_lock(&rp->lock); in rhine_rx()
2093 spin_unlock(&rp->lock); in rhine_rx()
2109 rp->rx_skbuff_dma[entry], in rhine_rx()
2110 rp->rx_buf_sz, in rhine_rx()
2114 rp->rx_skbuff[entry]->data, in rhine_rx()
2118 rp->rx_skbuff_dma[entry], in rhine_rx()
2119 rp->rx_buf_sz, in rhine_rx()
2127 skb = rp->rx_skbuff[entry]; in rhine_rx()
2130 rp->rx_skbuff_dma[entry], in rhine_rx()
2131 rp->rx_buf_sz, in rhine_rx()
2133 rhine_skb_dma_nic_store(rp, &sd, entry); in rhine_rx()
2144 u64_stats_update_begin(&rp->rx_stats.syncp); in rhine_rx()
2145 rp->rx_stats.bytes += pkt_len; in rhine_rx()
2146 rp->rx_stats.packets++; in rhine_rx()
2147 u64_stats_update_end(&rp->rx_stats.syncp); in rhine_rx()
2151 entry = (++rp->cur_rx) % RX_RING_SIZE; in rhine_rx()
2162 struct rhine_private *rp = netdev_priv(dev); in rhine_restart_tx() local
2163 void __iomem *ioaddr = rp->base; in rhine_restart_tx()
2164 int entry = rp->dirty_tx % TX_RING_SIZE; in rhine_restart_tx()
2171 intr_status = rhine_get_events(rp); in rhine_restart_tx()
2176 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), in rhine_restart_tx()
2182 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) in rhine_restart_tx()
2192 netif_warn(rp, tx_err, dev, "another error occurred %08x\n", in rhine_restart_tx()
2200 struct rhine_private *rp = in rhine_slow_event_task() local
2202 struct net_device *dev = rp->dev; in rhine_slow_event_task()
2205 mutex_lock(&rp->task_lock); in rhine_slow_event_task()
2207 if (!rp->task_enable) in rhine_slow_event_task()
2210 intr_status = rhine_get_events(rp); in rhine_slow_event_task()
2211 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); in rhine_slow_event_task()
2217 netif_warn(rp, hw, dev, "PCI error\n"); in rhine_slow_event_task()
2219 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); in rhine_slow_event_task()
2222 mutex_unlock(&rp->task_lock); in rhine_slow_event_task()
2228 struct rhine_private *rp = netdev_priv(dev); in rhine_get_stats64() local
2231 spin_lock_bh(&rp->lock); in rhine_get_stats64()
2232 rhine_update_rx_crc_and_missed_errord(rp); in rhine_get_stats64()
2233 spin_unlock_bh(&rp->lock); in rhine_get_stats64()
2238 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); in rhine_get_stats64()
2239 stats->rx_packets = rp->rx_stats.packets; in rhine_get_stats64()
2240 stats->rx_bytes = rp->rx_stats.bytes; in rhine_get_stats64()
2241 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); in rhine_get_stats64()
2244 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); in rhine_get_stats64()
2245 stats->tx_packets = rp->tx_stats.packets; in rhine_get_stats64()
2246 stats->tx_bytes = rp->tx_stats.bytes; in rhine_get_stats64()
2247 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); in rhine_get_stats64()
2254 struct rhine_private *rp = netdev_priv(dev); in rhine_set_rx_mode() local
2255 void __iomem *ioaddr = rp->base; in rhine_set_rx_mode()
2269 } else if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2291 if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2311 struct rhine_private *rp = netdev_priv(dev); in netdev_get_settings() local
2314 mutex_lock(&rp->task_lock); in netdev_get_settings()
2315 rc = mii_ethtool_gset(&rp->mii_if, cmd); in netdev_get_settings()
2316 mutex_unlock(&rp->task_lock); in netdev_get_settings()
2323 struct rhine_private *rp = netdev_priv(dev); in netdev_set_settings() local
2326 mutex_lock(&rp->task_lock); in netdev_set_settings()
2327 rc = mii_ethtool_sset(&rp->mii_if, cmd); in netdev_set_settings()
2328 rhine_set_carrier(&rp->mii_if); in netdev_set_settings()
2329 mutex_unlock(&rp->task_lock); in netdev_set_settings()
2336 struct rhine_private *rp = netdev_priv(dev); in netdev_nway_reset() local
2338 return mii_nway_restart(&rp->mii_if); in netdev_nway_reset()
2343 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link() local
2345 return mii_link_ok(&rp->mii_if); in netdev_get_link()
2350 struct rhine_private *rp = netdev_priv(dev); in netdev_get_msglevel() local
2352 return rp->msg_enable; in netdev_get_msglevel()
2357 struct rhine_private *rp = netdev_priv(dev); in netdev_set_msglevel() local
2359 rp->msg_enable = value; in netdev_set_msglevel()
2364 struct rhine_private *rp = netdev_priv(dev); in rhine_get_wol() local
2366 if (!(rp->quirks & rqWOL)) in rhine_get_wol()
2369 spin_lock_irq(&rp->lock); in rhine_get_wol()
2372 wol->wolopts = rp->wolopts; in rhine_get_wol()
2373 spin_unlock_irq(&rp->lock); in rhine_get_wol()
2378 struct rhine_private *rp = netdev_priv(dev); in rhine_set_wol() local
2382 if (!(rp->quirks & rqWOL)) in rhine_set_wol()
2388 spin_lock_irq(&rp->lock); in rhine_set_wol()
2389 rp->wolopts = wol->wolopts; in rhine_set_wol()
2390 spin_unlock_irq(&rp->lock); in rhine_set_wol()
2409 struct rhine_private *rp = netdev_priv(dev); in netdev_ioctl() local
2415 mutex_lock(&rp->task_lock); in netdev_ioctl()
2416 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
2417 rhine_set_carrier(&rp->mii_if); in netdev_ioctl()
2418 mutex_unlock(&rp->task_lock); in netdev_ioctl()
2425 struct rhine_private *rp = netdev_priv(dev); in rhine_close() local
2426 void __iomem *ioaddr = rp->base; in rhine_close()
2428 rhine_task_disable(rp); in rhine_close()
2429 napi_disable(&rp->napi); in rhine_close()
2432 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", in rhine_close()
2436 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); in rhine_close()
2438 rhine_irq_disable(rp); in rhine_close()
2443 free_irq(rp->irq, dev); in rhine_close()
2455 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_pci() local
2459 pci_iounmap(pdev, rp->base); in rhine_remove_one_pci()
2469 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_platform() local
2473 iounmap(rp->base); in rhine_remove_one_platform()
2483 struct rhine_private *rp = netdev_priv(dev); in rhine_shutdown_pci() local
2484 void __iomem *ioaddr = rp->base; in rhine_shutdown_pci()
2486 if (!(rp->quirks & rqWOL)) in rhine_shutdown_pci()
2492 if (rp->quirks & rq6patterns) in rhine_shutdown_pci()
2495 spin_lock(&rp->lock); in rhine_shutdown_pci()
2497 if (rp->wolopts & WAKE_MAGIC) { in rhine_shutdown_pci()
2506 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) in rhine_shutdown_pci()
2509 if (rp->wolopts & WAKE_PHY) in rhine_shutdown_pci()
2512 if (rp->wolopts & WAKE_UCAST) in rhine_shutdown_pci()
2515 if (rp->wolopts) { in rhine_shutdown_pci()
2521 spin_unlock(&rp->lock); in rhine_shutdown_pci()
2535 struct rhine_private *rp = netdev_priv(dev); in rhine_suspend() local
2540 rhine_task_disable(rp); in rhine_suspend()
2541 rhine_irq_disable(rp); in rhine_suspend()
2542 napi_disable(&rp->napi); in rhine_suspend()
2555 struct rhine_private *rp = netdev_priv(dev); in rhine_resume() local
2560 enable_mmio(rp->pioaddr, rp->quirks); in rhine_resume()
2564 rhine_reset_rbufs(rp); in rhine_resume()
2565 rhine_task_enable(rp); in rhine_resume()
2566 spin_lock_bh(&rp->lock); in rhine_resume()
2568 spin_unlock_bh(&rp->lock); in rhine_resume()