Lines Matching refs:dev
464 static inline struct ns83820 *PRIV(struct net_device *dev) in PRIV() argument
466 return netdev_priv(dev); in PRIV()
469 #define __kick_rx(dev) writel(CR_RXE, dev->base + CR) argument
473 struct ns83820 *dev = PRIV(ndev); in kick_rx() local
475 if (test_and_clear_bit(0, &dev->rx_info.idle)) { in kick_rx()
477 writel(dev->rx_info.phy_descs + in kick_rx()
478 (4 * DESC_SIZE * dev->rx_info.next_rx), in kick_rx()
479 dev->base + RXDP); in kick_rx()
480 if (dev->rx_info.next_rx == dev->rx_info.next_empty) in kick_rx()
483 __kick_rx(dev); in kick_rx()
488 #define start_tx_okay(dev) \ argument
489 (((NR_TX_DESC-2 + dev->tx_done_idx - dev->tx_free_idx) % NR_TX_DESC) > MIN_TX_DESC_FREE)
504 static inline void build_rx_desc(struct ns83820 *dev, __le32 *desc, dma_addr_t link, dma_addr_t buf… in build_rx_desc() argument
513 #define nr_rx_empty(dev) ((NR_RX_DESC-2 + dev->rx_info.next_rx - dev->rx_info.next_empty) % NR_RX_D… argument
514 static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) in ns83820_add_rx_skb() argument
521 next_empty = dev->rx_info.next_empty; in ns83820_add_rx_skb()
524 if (unlikely(nr_rx_empty(dev) <= 2)) { in ns83820_add_rx_skb()
531 dev->rx_info.next_empty, in ns83820_add_rx_skb()
532 dev->rx_info.nr_used, in ns83820_add_rx_skb()
533 dev->rx_info.next_rx in ns83820_add_rx_skb()
537 sg = dev->rx_info.descs + (next_empty * DESC_SIZE); in ns83820_add_rx_skb()
538 BUG_ON(NULL != dev->rx_info.skbs[next_empty]); in ns83820_add_rx_skb()
539 dev->rx_info.skbs[next_empty] = skb; in ns83820_add_rx_skb()
541 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; in ns83820_add_rx_skb()
543 buf = pci_map_single(dev->pci_dev, skb->data, in ns83820_add_rx_skb()
545 build_rx_desc(dev, sg, 0, buf, cmdsts, 0); in ns83820_add_rx_skb()
547 if (likely(next_empty != dev->rx_info.next_rx)) in ns83820_add_rx_skb()
548 …dev->rx_info.descs[((NR_RX_DESC + next_empty - 1) % NR_RX_DESC) * DESC_SIZE] = cpu_to_le32(dev->rx… in ns83820_add_rx_skb()
555 struct ns83820 *dev = PRIV(ndev); in rx_refill() local
559 if (unlikely(nr_rx_empty(dev) <= 2)) in rx_refill()
564 spin_lock_irqsave(&dev->rx_info.lock, flags); in rx_refill()
576 spin_lock_irqsave(&dev->rx_info.lock, flags); in rx_refill()
577 res = ns83820_add_rx_skb(dev, skb); in rx_refill()
579 spin_unlock_irqrestore(&dev->rx_info.lock, flags); in rx_refill()
586 spin_unlock_irqrestore(&dev->rx_info.lock, flags); in rx_refill()
599 struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); in queue_refill() local
600 struct net_device *ndev = dev->ndev; in queue_refill()
603 if (dev->rx_info.up) in queue_refill()
607 static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) in clear_rx_desc() argument
609 build_rx_desc(dev, dev->rx_info.descs + (DESC_SIZE * i), 0, 0, CMDSTS_OWN, 0); in clear_rx_desc()
614 struct ns83820 *dev = PRIV(ndev); in phy_intr() local
620 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY; in phy_intr()
622 if (dev->CFG_cache & CFG_TBI_EN) { in phy_intr()
624 tbisr = readl(dev->base + TBISR); in phy_intr()
625 tanar = readl(dev->base + TANAR); in phy_intr()
626 tanlpar = readl(dev->base + TANLPAR); in phy_intr()
634 writel(readl(dev->base + TXCFG) in phy_intr()
636 dev->base + TXCFG); in phy_intr()
637 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, in phy_intr()
638 dev->base + RXCFG); in phy_intr()
640 writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT, in phy_intr()
641 dev->base + GPIOR); in phy_intr()
651 writel((readl(dev->base + TXCFG) in phy_intr()
653 dev->base + TXCFG); in phy_intr()
654 writel(readl(dev->base + RXCFG) & ~RXCFG_RX_FD, in phy_intr()
655 dev->base + RXCFG); in phy_intr()
657 writel(readl(dev->base + GPIOR) & ~GPIOR_GP1_OUT, in phy_intr()
658 dev->base + GPIOR); in phy_intr()
665 new_cfg = dev->CFG_cache & ~(CFG_SB | CFG_MODE_1000 | CFG_SPDSTS); in phy_intr()
677 writel(readl(dev->base + TXCFG) in phy_intr()
679 dev->base + TXCFG); in phy_intr()
680 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD, in phy_intr()
681 dev->base + RXCFG); in phy_intr()
683 writel(readl(dev->base + TXCFG) in phy_intr()
685 dev->base + TXCFG); in phy_intr()
686 writel(readl(dev->base + RXCFG) & ~(RXCFG_RX_FD), in phy_intr()
687 dev->base + RXCFG); in phy_intr()
691 ((new_cfg ^ dev->CFG_cache) != 0)) { in phy_intr()
692 writel(new_cfg, dev->base + CFG); in phy_intr()
693 dev->CFG_cache = new_cfg; in phy_intr()
696 dev->CFG_cache &= ~CFG_SPDSTS; in phy_intr()
697 dev->CFG_cache |= cfg & CFG_SPDSTS; in phy_intr()
703 dev->linkstate != newlinkstate) { in phy_intr()
711 dev->linkstate != newlinkstate) { in phy_intr()
716 dev->linkstate = newlinkstate; in phy_intr()
721 struct ns83820 *dev = PRIV(ndev); in ns83820_setup_rx() local
727 dev->rx_info.idle = 1; in ns83820_setup_rx()
728 dev->rx_info.next_rx = 0; in ns83820_setup_rx()
729 dev->rx_info.next_rx_desc = dev->rx_info.descs; in ns83820_setup_rx()
730 dev->rx_info.next_empty = 0; in ns83820_setup_rx()
733 clear_rx_desc(dev, i); in ns83820_setup_rx()
735 writel(0, dev->base + RXDP_HI); in ns83820_setup_rx()
736 writel(dev->rx_info.phy_descs, dev->base + RXDP); in ns83820_setup_rx()
742 spin_lock_irq(&dev->rx_info.lock); in ns83820_setup_rx()
744 writel(0x0001, dev->base + CCSR); in ns83820_setup_rx()
745 writel(0, dev->base + RFCR); in ns83820_setup_rx()
746 writel(0x7fc00000, dev->base + RFCR); in ns83820_setup_rx()
747 writel(0xffc00000, dev->base + RFCR); in ns83820_setup_rx()
749 dev->rx_info.up = 1; in ns83820_setup_rx()
754 spin_lock(&dev->misc_lock); in ns83820_setup_rx()
755 dev->IMR_cache |= ISR_PHY; in ns83820_setup_rx()
756 dev->IMR_cache |= ISR_RXRCMP; in ns83820_setup_rx()
759 dev->IMR_cache |= ISR_RXORN; in ns83820_setup_rx()
760 dev->IMR_cache |= ISR_RXSOVR; in ns83820_setup_rx()
761 dev->IMR_cache |= ISR_RXDESC; in ns83820_setup_rx()
762 dev->IMR_cache |= ISR_RXIDLE; in ns83820_setup_rx()
763 dev->IMR_cache |= ISR_TXDESC; in ns83820_setup_rx()
764 dev->IMR_cache |= ISR_TXIDLE; in ns83820_setup_rx()
766 writel(dev->IMR_cache, dev->base + IMR); in ns83820_setup_rx()
767 writel(1, dev->base + IER); in ns83820_setup_rx()
768 spin_unlock(&dev->misc_lock); in ns83820_setup_rx()
772 spin_unlock_irq(&dev->rx_info.lock); in ns83820_setup_rx()
777 static void ns83820_cleanup_rx(struct ns83820 *dev) in ns83820_cleanup_rx() argument
782 dprintk("ns83820_cleanup_rx(%p)\n", dev); in ns83820_cleanup_rx()
785 spin_lock_irqsave(&dev->misc_lock, flags); in ns83820_cleanup_rx()
786 dev->IMR_cache &= ~(ISR_RXOK | ISR_RXDESC | ISR_RXERR | ISR_RXEARLY | ISR_RXIDLE); in ns83820_cleanup_rx()
787 writel(dev->IMR_cache, dev->base + IMR); in ns83820_cleanup_rx()
788 spin_unlock_irqrestore(&dev->misc_lock, flags); in ns83820_cleanup_rx()
791 dev->rx_info.up = 0; in ns83820_cleanup_rx()
792 synchronize_irq(dev->pci_dev->irq); in ns83820_cleanup_rx()
795 readl(dev->base + IMR); in ns83820_cleanup_rx()
798 writel(0, dev->base + RXDP_HI); in ns83820_cleanup_rx()
799 writel(0, dev->base + RXDP); in ns83820_cleanup_rx()
802 struct sk_buff *skb = dev->rx_info.skbs[i]; in ns83820_cleanup_rx()
803 dev->rx_info.skbs[i] = NULL; in ns83820_cleanup_rx()
804 clear_rx_desc(dev, i); in ns83820_cleanup_rx()
811 struct ns83820 *dev = PRIV(ndev); in ns83820_rx_kick() local
813 if (dev->rx_info.up) { in ns83820_rx_kick()
819 if (dev->rx_info.up && nr_rx_empty(dev) > NR_RX_DESC*3/4) in ns83820_rx_kick()
820 schedule_work(&dev->tq_refill); in ns83820_rx_kick()
823 if (dev->rx_info.idle) in ns83820_rx_kick()
832 struct ns83820 *dev = PRIV(ndev); local
833 struct rx_info *info = &dev->rx_info;
843 readl(dev->base + RXDP),
844 (long)(dev->rx_info.phy_descs),
845 (int)dev->rx_info.next_rx,
846 (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_rx)),
847 (int)dev->rx_info.next_empty,
848 (dev->rx_info.descs + (DESC_SIZE * dev->rx_info.next_empty))
873 clear_rx_desc(dev, next_rx);
875 pci_unmap_single(dev->pci_dev, bufptr,
943 struct ns83820 *dev = PRIV(ndev); local
945 writel(ihr, dev->base + IHR);
947 spin_lock_irq(&dev->misc_lock);
948 dev->IMR_cache |= ISR_RXDESC;
949 writel(dev->IMR_cache, dev->base + IMR);
950 spin_unlock_irq(&dev->misc_lock);
958 static inline void kick_tx(struct ns83820 *dev) argument
961 dev, dev->tx_idx, dev->tx_free_idx);
962 writel(CR_TXE, dev->base + CR);
970 struct ns83820 *dev = PRIV(ndev); local
975 tx_done_idx = dev->tx_done_idx;
976 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
979 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
980 while ((tx_done_idx != dev->tx_free_idx) &&
994 tx_done_idx, dev->tx_free_idx, cmdsts);
995 skb = dev->tx_skbs[tx_done_idx];
996 dev->tx_skbs[tx_done_idx] = NULL;
1002 pci_unmap_single(dev->pci_dev,
1007 atomic_dec(&dev->nr_tx_skbs);
1009 pci_unmap_page(dev->pci_dev,
1015 dev->tx_done_idx = tx_done_idx;
1018 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
1024 if (netif_queue_stopped(ndev) && start_tx_okay(dev)) {
1031 static void ns83820_cleanup_tx(struct ns83820 *dev) argument
1036 struct sk_buff *skb = dev->tx_skbs[i];
1037 dev->tx_skbs[i] = NULL;
1039 __le32 *desc = dev->tx_descs + (i * DESC_SIZE);
1040 pci_unmap_single(dev->pci_dev,
1045 atomic_dec(&dev->nr_tx_skbs);
1049 memset(dev->tx_descs, 0, NR_TX_DESC * DESC_SIZE * 4);
1061 struct ns83820 *dev = PRIV(ndev); local
1076 if (unlikely(dev->CFG_cache & CFG_LNKSTS)) {
1078 if (unlikely(dev->CFG_cache & CFG_LNKSTS))
1083 last_idx = free_idx = dev->tx_free_idx;
1084 tx_done_idx = dev->tx_done_idx;
1092 if (dev->tx_done_idx != tx_done_idx) {
1100 if (free_idx == dev->tx_intr_idx) {
1102 dev->tx_intr_idx = (dev->tx_intr_idx + NR_TX_DESC/4) % NR_TX_DESC;
1138 buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1140 first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
1143 volatile __le32 *desc = dev->tx_descs + (free_idx * DESC_SIZE);
1149 desc[DESC_LINK] = cpu_to_le32(dev->tx_phy_descs + (free_idx * DESC_SIZE * 4));
1161 buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0,
1172 spin_lock_irq(&dev->tx_lock);
1173 dev->tx_skbs[last_idx] = skb;
1175 dev->tx_free_idx = free_idx;
1176 atomic_inc(&dev->nr_tx_skbs);
1177 spin_unlock_irq(&dev->tx_lock);
1179 kick_tx(dev);
1182 if (stopped && (dev->tx_done_idx != tx_done_idx) && start_tx_okay(dev))
1188 static void ns83820_update_stats(struct ns83820 *dev) argument
1190 struct net_device *ndev = dev->ndev;
1191 u8 __iomem *base = dev->base;
1209 struct ns83820 *dev = PRIV(ndev); local
1212 spin_lock_irq(&dev->misc_lock);
1213 ns83820_update_stats(dev);
1214 spin_unlock_irq(&dev->misc_lock);
1223 struct ns83820 *dev = PRIV(ndev); local
1241 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
1242 tanar = readl(dev->base + TANAR);
1243 tbicr = readl(dev->base + TBICR);
1249 if (dev->CFG_cache & CFG_TBI_EN) {
1286 struct ns83820 *dev = PRIV(ndev); local
1292 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
1293 tanar = readl(dev->base + TANAR);
1295 if (dev->CFG_cache & CFG_TBI_EN) {
1305 spin_lock_irq(&dev->misc_lock);
1306 spin_lock(&dev->tx_lock);
1314 writel(readl(dev->base + TXCFG)
1316 dev->base + TXCFG);
1317 writel(readl(dev->base + RXCFG) | RXCFG_RX_FD,
1318 dev->base + RXCFG);
1320 writel(readl(dev->base + GPIOR) | GPIOR_GP1_OUT,
1321 dev->base + GPIOR);
1339 dev->base + TBICR);
1340 writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
1341 dev->linkstate = LINK_AUTONEGOTIATE;
1347 writel(0x00000000, dev->base + TBICR);
1355 spin_unlock(&dev->tx_lock);
1356 spin_unlock_irq(&dev->misc_lock);
1364 struct ns83820 *dev = PRIV(ndev); local
1367 strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
1372 struct ns83820 *dev = PRIV(ndev); local
1373 u32 cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
1384 static inline void ns83820_disable_interrupts(struct ns83820 *dev) argument
1386 writel(0, dev->base + IMR);
1387 writel(0, dev->base + IER);
1388 readl(dev->base + IER);
1392 static void ns83820_mib_isr(struct ns83820 *dev) argument
1395 spin_lock_irqsave(&dev->misc_lock, flags);
1396 ns83820_update_stats(dev);
1397 spin_unlock_irqrestore(&dev->misc_lock, flags);
1404 struct ns83820 *dev = PRIV(ndev); local
1408 dev->ihr = 0;
1410 isr = readl(dev->base + ISR);
1418 struct ns83820 *dev = PRIV(ndev); local
1427 dev->rx_info.idle = 1;
1433 prefetch(dev->rx_info.next_rx_desc);
1435 spin_lock_irqsave(&dev->misc_lock, flags);
1436 dev->IMR_cache &= ~(ISR_RXDESC | ISR_RXOK);
1437 writel(dev->IMR_cache, dev->base + IMR);
1438 spin_unlock_irqrestore(&dev->misc_lock, flags);
1440 tasklet_schedule(&dev->rx_tasklet);
1458 if ((ISR_RXRCMP & isr) && dev->rx_info.up)
1459 writel(CR_RXE, dev->base + CR);
1463 txdp = readl(dev->base + TXDP);
1465 txdp -= dev->tx_phy_descs;
1466 dev->tx_idx = txdp / (DESC_SIZE * 4);
1467 if (dev->tx_idx >= NR_TX_DESC) {
1469 dev->tx_idx = 0;
1476 if (dev->tx_idx != dev->tx_free_idx)
1477 kick_tx(dev);
1484 spin_lock_irqsave(&dev->tx_lock, flags);
1486 spin_unlock_irqrestore(&dev->tx_lock, flags);
1490 if ((dev->tx_done_idx == dev->tx_free_idx) &&
1491 (dev->IMR_cache & ISR_TXOK)) {
1492 spin_lock_irqsave(&dev->misc_lock, flags);
1493 dev->IMR_cache &= ~ISR_TXOK;
1494 writel(dev->IMR_cache, dev->base + IMR);
1495 spin_unlock_irqrestore(&dev->misc_lock, flags);
1505 if ((ISR_TXIDLE & isr) && (dev->tx_done_idx != dev->tx_free_idx)) {
1506 spin_lock_irqsave(&dev->misc_lock, flags);
1507 dev->IMR_cache |= ISR_TXOK;
1508 writel(dev->IMR_cache, dev->base + IMR);
1509 spin_unlock_irqrestore(&dev->misc_lock, flags);
1514 ns83820_mib_isr(dev);
1521 if (dev->ihr)
1522 writel(dev->ihr, dev->base + IHR);
1526 static void ns83820_do_reset(struct ns83820 *dev, u32 which) argument
1529 writel(which, dev->base + CR);
1532 } while (readl(dev->base + CR) & which);
1538 struct ns83820 *dev = PRIV(ndev); local
1541 del_timer_sync(&dev->tx_watchdog);
1543 ns83820_disable_interrupts(dev);
1545 dev->rx_info.up = 0;
1546 synchronize_irq(dev->pci_dev->irq);
1548 ns83820_do_reset(dev, CR_RST);
1550 synchronize_irq(dev->pci_dev->irq);
1552 spin_lock_irq(&dev->misc_lock);
1553 dev->IMR_cache &= ~(ISR_TXURN | ISR_TXIDLE | ISR_TXERR | ISR_TXDESC | ISR_TXOK);
1554 spin_unlock_irq(&dev->misc_lock);
1556 ns83820_cleanup_rx(dev);
1557 ns83820_cleanup_tx(dev);
1564 struct ns83820 *dev = PRIV(ndev); local
1569 spin_lock_irqsave(&dev->tx_lock, flags);
1571 tx_done_idx = dev->tx_done_idx;
1572 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
1576 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
1581 isr = readl(dev->base + ISR);
1582 printk("irq: %08x imr: %08x\n", isr, dev->IMR_cache);
1589 tx_done_idx = dev->tx_done_idx;
1590 desc = dev->tx_descs + (tx_done_idx * DESC_SIZE);
1594 tx_done_idx, dev->tx_free_idx, le32_to_cpu(desc[DESC_CMDSTS]));
1596 spin_unlock_irqrestore(&dev->tx_lock, flags);
1602 struct ns83820 *dev = PRIV(ndev); local
1606 dev->tx_done_idx, dev->tx_free_idx, atomic_read(&dev->nr_tx_skbs)
1611 dev->tx_done_idx != dev->tx_free_idx) {
1614 dev->tx_done_idx, dev->tx_free_idx,
1615 atomic_read(&dev->nr_tx_skbs));
1619 mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
1624 struct ns83820 *dev = PRIV(ndev); local
1631 writel(0, dev->base + PQCR);
1637 memset(dev->tx_descs, 0, 4 * NR_TX_DESC * DESC_SIZE);
1639 dev->tx_descs[(i * DESC_SIZE) + DESC_LINK]
1641 dev->tx_phy_descs
1645 dev->tx_idx = 0;
1646 dev->tx_done_idx = 0;
1647 desc = dev->tx_phy_descs;
1648 writel(0, dev->base + TXDP_HI);
1649 writel(desc, dev->base + TXDP);
1651 init_timer(&dev->tx_watchdog);
1652 dev->tx_watchdog.data = (unsigned long)ndev;
1653 dev->tx_watchdog.function = ns83820_tx_watch;
1654 mod_timer(&dev->tx_watchdog, jiffies + 2*HZ);
1665 static void ns83820_getmac(struct ns83820 *dev, u8 *mac) argument
1674 writel(i*2, dev->base + RFCR);
1675 data = readl(dev->base + RFDR);
1692 struct ns83820 *dev = PRIV(ndev); local
1693 u8 __iomem *rfcr = dev->base + RFCR;
1708 spin_lock_irq(&dev->misc_lock);
1713 spin_unlock_irq(&dev->misc_lock);
1718 struct ns83820 *dev = PRIV(ndev); local
1728 writel(enable, dev->base + PTSCR);
1731 status = readl(dev->base + PTSCR);
1756 static void ns83820_mii_write_bit(struct ns83820 *dev, int bit) argument
1759 dev->MEAR_cache &= ~MEAR_MDC;
1760 writel(dev->MEAR_cache, dev->base + MEAR);
1761 readl(dev->base + MEAR);
1764 dev->MEAR_cache |= MEAR_MDDIR;
1766 dev->MEAR_cache |= MEAR_MDIO;
1768 dev->MEAR_cache &= ~MEAR_MDIO;
1771 writel(dev->MEAR_cache, dev->base + MEAR);
1772 readl(dev->base + MEAR);
1778 dev->MEAR_cache |= MEAR_MDC;
1779 writel(dev->MEAR_cache, dev->base + MEAR);
1780 readl(dev->base + MEAR);
1786 static int ns83820_mii_read_bit(struct ns83820 *dev) argument
1791 dev->MEAR_cache &= ~MEAR_MDC;
1792 dev->MEAR_cache &= ~MEAR_MDDIR;
1793 writel(dev->MEAR_cache, dev->base + MEAR);
1794 readl(dev->base + MEAR);
1800 bit = (readl(dev->base + MEAR) & MEAR_MDIO) ? 1 : 0;
1801 dev->MEAR_cache |= MEAR_MDC;
1802 writel(dev->MEAR_cache, dev->base + MEAR);
1810 static unsigned ns83820_mii_read_reg(struct ns83820 *dev, unsigned phy, unsigned reg) argument
1817 ns83820_mii_read_bit(dev);
1819 ns83820_mii_write_bit(dev, 0); /* start */
1820 ns83820_mii_write_bit(dev, 1);
1821 ns83820_mii_write_bit(dev, 1); /* opcode read */
1822 ns83820_mii_write_bit(dev, 0);
1826 ns83820_mii_write_bit(dev, phy & (0x10 >> i));
1830 ns83820_mii_write_bit(dev, reg & (0x10 >> i));
1832 ns83820_mii_read_bit(dev); /* turn around cycles */
1833 ns83820_mii_read_bit(dev);
1838 data |= ns83820_mii_read_bit(dev);
1844 static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigned reg, unsigned dat… argument
1850 ns83820_mii_read_bit(dev);
1852 ns83820_mii_write_bit(dev, 0); /* start */
1853 ns83820_mii_write_bit(dev, 1);
1854 ns83820_mii_write_bit(dev, 0); /* opcode read */
1855 ns83820_mii_write_bit(dev, 1);
1859 ns83820_mii_write_bit(dev, phy & (0x10 >> i));
1863 ns83820_mii_write_bit(dev, reg & (0x10 >> i));
1865 ns83820_mii_read_bit(dev); /* turn around cycles */
1866 ns83820_mii_read_bit(dev);
1870 ns83820_mii_write_bit(dev, (data >> (15 - i)) & 1);
1877 struct ns83820 *dev = PRIV(ndev); local
1886 ns83820_mii_read_reg(dev, 1, 0x09);
1887 ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e);
1889 tmp = ns83820_mii_read_reg(dev, 1, 0x00);
1890 ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000);
1892 ns83820_mii_read_reg(dev, 1, 0x09);
1900 a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1);
1901 b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2);
1909 ns83820_mii_read_reg(dev, i, 0 + j),
1910 ns83820_mii_read_reg(dev, i, 1 + j),
1911 ns83820_mii_read_reg(dev, i, 2 + j),
1912 ns83820_mii_read_reg(dev, i, 3 + j)
1919 ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
1920 ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
1921 a = ns83820_mii_read_reg(dev, 1, 0x1d);
1923 ns83820_mii_write_reg(dev, 1, 0x16, 0x000d);
1924 ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e);
1925 b = ns83820_mii_read_reg(dev, 1, 0x1d);
1947 struct ns83820 *dev; local
1959 dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
1968 dev = PRIV(ndev);
1969 dev->ndev = ndev;
1971 spin_lock_init(&dev->rx_info.lock);
1972 spin_lock_init(&dev->tx_lock);
1973 spin_lock_init(&dev->misc_lock);
1974 dev->pci_dev = pci_dev;
1976 SET_NETDEV_DEV(ndev, &pci_dev->dev);
1978 INIT_WORK(&dev->tq_refill, queue_refill);
1979 tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
1983 dev_info(&pci_dev->dev, "pci_enable_dev failed: %d\n", err);
1989 dev->base = ioremap_nocache(addr, PAGE_SIZE);
1990 dev->tx_descs = pci_alloc_consistent(pci_dev,
1991 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
1992 dev->rx_info.descs = pci_alloc_consistent(pci_dev,
1993 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
1995 if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
1999 dev->tx_descs, (long)dev->tx_phy_descs,
2000 dev->rx_info.descs, (long)dev->rx_info.phy_descs);
2002 ns83820_disable_interrupts(dev);
2004 dev->IMR_cache = 0;
2009 dev_info(&pci_dev->dev, "unable to register irq %d, err %d\n",
2024 dev_info(&pci_dev->dev, "unable to get netdev name: %d\n", err);
2029 ndev->name, le32_to_cpu(readl(dev->base + 0x22c)),
2037 ns83820_do_reset(dev, CR_RST);
2040 writel(PTSCR_RBIST_RST, dev->base + PTSCR);
2048 dev->CFG_cache = readl(dev->base + CFG);
2050 if ((dev->CFG_cache & CFG_PCI64_DET)) {
2054 if (!(dev->CFG_cache & CFG_DATA64_EN))
2058 dev->CFG_cache &= ~(CFG_DATA64_EN);
2060 dev->CFG_cache &= (CFG_TBI_EN | CFG_MRM_DIS | CFG_MWI_DIS |
2063 dev->CFG_cache |= CFG_PINT_DUPSTS | CFG_PINT_LNKSTS | CFG_PINT_SPDSTS |
2065 dev->CFG_cache |= CFG_REQALG;
2066 dev->CFG_cache |= CFG_POW;
2067 dev->CFG_cache |= CFG_TMRTEST;
2073 dev->CFG_cache |= CFG_M64ADDR;
2075 dev->CFG_cache |= CFG_T64ADDR;
2078 dev->CFG_cache &= ~CFG_BEM;
2081 if (dev->CFG_cache & CFG_TBI_EN) {
2084 writel(readl(dev->base + GPIOR) | 0x3e8, dev->base + GPIOR);
2087 writel(readl(dev->base + TANAR)
2089 dev->base + TANAR);
2093 dev->base + TBICR);
2094 writel(TBICR_MR_AN_ENABLE, dev->base + TBICR);
2095 dev->linkstate = LINK_AUTONEGOTIATE;
2097 dev->CFG_cache |= CFG_MODE_1000;
2100 writel(dev->CFG_cache, dev->base + CFG);
2101 dprintk("CFG: %08x\n", dev->CFG_cache);
2105 writel(dev->CFG_cache | CFG_PHY_RST, dev->base + CFG);
2107 writel(dev->CFG_cache, dev->base + CFG);
2113 if (readl(dev->base + SRR))
2114 writel(readl(dev->base+0x20c) | 0xfe00, dev->base + 0x20c);
2126 dev->base + TXCFG);
2129 writel(0x000, dev->base + IHR);
2130 writel(0x100, dev->base + IHR);
2131 writel(0x000, dev->base + IHR);
2142 | (RXCFG_MXDMA512) | 0, dev->base + RXCFG);
2145 writel(0, dev->base + PQCR);
2165 writel(VRCR_INIT_VALUE, dev->base + VRCR);
2176 writel(VTCR_INIT_VALUE, dev->base + VTCR);
2182 dev->base + PCR);
2185 writel(0, dev->base + WCSR);
2187 ns83820_getmac(dev, ndev->dev_addr);
2206 (unsigned)readl(dev->base + SRR) >> 8,
2207 (unsigned)readl(dev->base + SRR) & 0xff,
2226 ns83820_disable_interrupts(dev); /* paranoia */
2231 if (dev->base)
2232 iounmap(dev->base);
2233 pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
2234 …pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_desc…
2245 struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */ local
2250 ns83820_disable_interrupts(dev); /* paranoia */
2253 free_irq(dev->pci_dev->irq, ndev);
2254 iounmap(dev->base);
2255 pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
2256 dev->tx_descs, dev->tx_phy_descs);
2257 pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
2258 dev->rx_info.descs, dev->rx_info.phy_descs);
2259 pci_disable_device(dev->pci_dev);