Lines Matching refs:cp

353 #define cpr8(reg)	readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
405 static inline void cp_set_rxbufsize (struct cp_private *cp) in cp_set_rxbufsize() argument
407 unsigned int mtu = cp->dev->mtu; in cp_set_rxbufsize()
411 cp->rx_buf_sz = mtu + ETH_HLEN + 8; in cp_set_rxbufsize()
413 cp->rx_buf_sz = PKT_BUF_SZ; in cp_set_rxbufsize()
416 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, in cp_rx_skb() argument
421 skb->protocol = eth_type_trans (skb, cp->dev); in cp_rx_skb()
423 cp->dev->stats.rx_packets++; in cp_rx_skb()
424 cp->dev->stats.rx_bytes += skb->len; in cp_rx_skb()
429 napi_gro_receive(&cp->napi, skb); in cp_rx_skb()
432 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, in cp_rx_err_acct() argument
435 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", in cp_rx_err_acct()
437 cp->dev->stats.rx_errors++; in cp_rx_err_acct()
439 cp->dev->stats.rx_frame_errors++; in cp_rx_err_acct()
441 cp->dev->stats.rx_crc_errors++; in cp_rx_err_acct()
443 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
445 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
447 cp->dev->stats.rx_fifo_errors++; in cp_rx_err_acct()
463 struct cp_private *cp = container_of(napi, struct cp_private, napi); in cp_rx_poll() local
464 struct net_device *dev = cp->dev; in cp_rx_poll()
465 unsigned int rx_tail = cp->rx_tail; in cp_rx_poll()
477 const unsigned buflen = cp->rx_buf_sz; in cp_rx_poll()
479 skb = cp->rx_skb[rx_tail]; in cp_rx_poll()
482 desc = &cp->rx_ring[rx_tail]; in cp_rx_poll()
496 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
498 cp->cp_stats.rx_frags++; in cp_rx_poll()
503 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", in cp_rx_poll()
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, in cp_rx_poll()
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { in cp_rx_poll()
524 dma_unmap_single(&cp->pdev->dev, mapping, in cp_rx_poll()
535 cp->rx_skb[rx_tail] = new_skb; in cp_rx_poll()
537 cp_rx_skb(cp, skb, desc); in cp_rx_poll()
542 cp->rx_ring[rx_tail].opts2 = 0; in cp_rx_poll()
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); in cp_rx_poll()
546 cp->rx_buf_sz); in cp_rx_poll()
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_rx_poll()
552 cp->rx_tail = rx_tail; in cp_rx_poll()
564 spin_lock_irqsave(&cp->lock, flags); in cp_rx_poll()
567 spin_unlock_irqrestore(&cp->lock, flags); in cp_rx_poll()
576 struct cp_private *cp; in cp_interrupt() local
582 cp = netdev_priv(dev); in cp_interrupt()
584 spin_lock(&cp->lock); in cp_interrupt()
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", in cp_interrupt()
604 if (napi_schedule_prep(&cp->napi)) { in cp_interrupt()
606 __napi_schedule(&cp->napi); in cp_interrupt()
610 cp_tx(cp); in cp_interrupt()
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_interrupt()
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); in cp_interrupt()
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); in cp_interrupt()
627 spin_unlock(&cp->lock); in cp_interrupt()
639 struct cp_private *cp = netdev_priv(dev); in cp_poll_controller() local
640 const int irq = cp->pdev->irq; in cp_poll_controller()
648 static void cp_tx (struct cp_private *cp) in cp_tx() argument
650 unsigned tx_head = cp->tx_head; in cp_tx()
651 unsigned tx_tail = cp->tx_tail; in cp_tx()
655 struct cp_desc *txd = cp->tx_ring + tx_tail; in cp_tx()
664 skb = cp->tx_skb[tx_tail]; in cp_tx()
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in cp_tx()
673 netif_dbg(cp, tx_err, cp->dev, in cp_tx()
675 cp->dev->stats.tx_errors++; in cp_tx()
677 cp->dev->stats.tx_window_errors++; in cp_tx()
679 cp->dev->stats.tx_aborted_errors++; in cp_tx()
681 cp->dev->stats.tx_carrier_errors++; in cp_tx()
683 cp->dev->stats.tx_fifo_errors++; in cp_tx()
685 cp->dev->stats.collisions += in cp_tx()
687 cp->dev->stats.tx_packets++; in cp_tx()
688 cp->dev->stats.tx_bytes += skb->len; in cp_tx()
689 netif_dbg(cp, tx_done, cp->dev, in cp_tx()
697 cp->tx_skb[tx_tail] = NULL; in cp_tx()
702 cp->tx_tail = tx_tail; in cp_tx()
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); in cp_tx()
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) in cp_tx()
706 netif_wake_queue(cp->dev); in cp_tx()
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, in unwind_tx_frag_mapping() argument
723 cp->tx_skb[index] = NULL; in unwind_tx_frag_mapping()
724 txd = &cp->tx_ring[index]; in unwind_tx_frag_mapping()
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in unwind_tx_frag_mapping()
734 struct cp_private *cp = netdev_priv(dev); in cp_start_xmit() local
741 spin_lock_irqsave(&cp->lock, intr_flags); in cp_start_xmit()
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { in cp_start_xmit()
746 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
751 entry = cp->tx_head; in cp_start_xmit()
758 struct cp_desc *txd = &cp->tx_ring[entry]; in cp_start_xmit()
763 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); in cp_start_xmit()
764 if (dma_mapping_error(&cp->pdev->dev, mapping)) in cp_start_xmit()
788 cp->tx_skb[entry] = skb; in cp_start_xmit()
802 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_start_xmit()
804 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) in cp_start_xmit()
807 cp->tx_skb[entry] = skb; in cp_start_xmit()
817 mapping = dma_map_single(&cp->pdev->dev, in cp_start_xmit()
820 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_start_xmit()
821 unwind_tx_frag_mapping(cp, skb, first_entry, entry); in cp_start_xmit()
844 txd = &cp->tx_ring[entry]; in cp_start_xmit()
852 cp->tx_skb[entry] = skb; in cp_start_xmit()
856 txd = &cp->tx_ring[first_entry]; in cp_start_xmit()
877 cp->tx_head = entry; in cp_start_xmit()
880 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", in cp_start_xmit()
882 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) in cp_start_xmit()
886 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
893 cp->dev->stats.tx_dropped++; in cp_start_xmit()
902 struct cp_private *cp = netdev_priv(dev); in __cp_set_rx_mode() local
931 cp->rx_config = cp_rx_config | rx_mode; in __cp_set_rx_mode()
932 cpw32_f(RxConfig, cp->rx_config); in __cp_set_rx_mode()
941 struct cp_private *cp = netdev_priv(dev); in cp_set_rx_mode() local
943 spin_lock_irqsave (&cp->lock, flags); in cp_set_rx_mode()
945 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_rx_mode()
948 static void __cp_get_stats(struct cp_private *cp) in __cp_get_stats() argument
951 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); in __cp_get_stats()
957 struct cp_private *cp = netdev_priv(dev); in cp_get_stats() local
961 spin_lock_irqsave(&cp->lock, flags); in cp_get_stats()
963 __cp_get_stats(cp); in cp_get_stats()
964 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_stats()
969 static void cp_stop_hw (struct cp_private *cp) in cp_stop_hw() argument
977 cp->rx_tail = 0; in cp_stop_hw()
978 cp->tx_head = cp->tx_tail = 0; in cp_stop_hw()
980 netdev_reset_queue(cp->dev); in cp_stop_hw()
983 static void cp_reset_hw (struct cp_private *cp) in cp_reset_hw() argument
996 netdev_err(cp->dev, "hardware reset timeout\n"); in cp_reset_hw()
999 static inline void cp_start_hw (struct cp_private *cp) in cp_start_hw() argument
1003 cpw16(CpCmd, cp->cpcmd); in cp_start_hw()
1016 ring_dma = cp->ring_dma; in cp_start_hw()
1032 netdev_reset_queue(cp->dev); in cp_start_hw()
1035 static void cp_enable_irq(struct cp_private *cp) in cp_enable_irq() argument
1040 static void cp_init_hw (struct cp_private *cp) in cp_init_hw() argument
1042 struct net_device *dev = cp->dev; in cp_init_hw()
1044 cp_reset_hw(cp); in cp_init_hw()
1052 cp_start_hw(cp); in cp_init_hw()
1061 cp->wol_enabled = 0; in cp_init_hw()
1070 static int cp_refill_rx(struct cp_private *cp) in cp_refill_rx() argument
1072 struct net_device *dev = cp->dev; in cp_refill_rx()
1079 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); in cp_refill_rx()
1083 mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_refill_rx()
1084 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_refill_rx()
1085 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_refill_rx()
1089 cp->rx_skb[i] = skb; in cp_refill_rx()
1091 cp->rx_ring[i].opts2 = 0; in cp_refill_rx()
1092 cp->rx_ring[i].addr = cpu_to_le64(mapping); in cp_refill_rx()
1094 cp->rx_ring[i].opts1 = in cp_refill_rx()
1095 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); in cp_refill_rx()
1097 cp->rx_ring[i].opts1 = in cp_refill_rx()
1098 cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_refill_rx()
1104 cp_clean_rings(cp); in cp_refill_rx()
1108 static void cp_init_rings_index (struct cp_private *cp) in cp_init_rings_index() argument
1110 cp->rx_tail = 0; in cp_init_rings_index()
1111 cp->tx_head = cp->tx_tail = 0; in cp_init_rings_index()
1114 static int cp_init_rings (struct cp_private *cp) in cp_init_rings() argument
1116 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_init_rings()
1117 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); in cp_init_rings()
1119 cp_init_rings_index(cp); in cp_init_rings()
1121 return cp_refill_rx (cp); in cp_init_rings()
1124 static int cp_alloc_rings (struct cp_private *cp) in cp_alloc_rings() argument
1126 struct device *d = &cp->pdev->dev; in cp_alloc_rings()
1130 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); in cp_alloc_rings()
1134 cp->rx_ring = mem; in cp_alloc_rings()
1135 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; in cp_alloc_rings()
1137 rc = cp_init_rings(cp); in cp_alloc_rings()
1139 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); in cp_alloc_rings()
1144 static void cp_clean_rings (struct cp_private *cp) in cp_clean_rings() argument
1150 if (cp->rx_skb[i]) { in cp_clean_rings()
1151 desc = cp->rx_ring + i; in cp_clean_rings()
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1153 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_clean_rings()
1154 dev_kfree_skb(cp->rx_skb[i]); in cp_clean_rings()
1159 if (cp->tx_skb[i]) { in cp_clean_rings()
1160 struct sk_buff *skb = cp->tx_skb[i]; in cp_clean_rings()
1162 desc = cp->tx_ring + i; in cp_clean_rings()
1163 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1168 cp->dev->stats.tx_dropped++; in cp_clean_rings()
1171 netdev_reset_queue(cp->dev); in cp_clean_rings()
1173 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); in cp_clean_rings()
1174 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_clean_rings()
1176 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); in cp_clean_rings()
1177 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); in cp_clean_rings()
1180 static void cp_free_rings (struct cp_private *cp) in cp_free_rings() argument
1182 cp_clean_rings(cp); in cp_free_rings()
1183 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, in cp_free_rings()
1184 cp->ring_dma); in cp_free_rings()
1185 cp->rx_ring = NULL; in cp_free_rings()
1186 cp->tx_ring = NULL; in cp_free_rings()
1191 struct cp_private *cp = netdev_priv(dev); in cp_open() local
1192 const int irq = cp->pdev->irq; in cp_open()
1195 netif_dbg(cp, ifup, dev, "enabling interface\n"); in cp_open()
1197 rc = cp_alloc_rings(cp); in cp_open()
1201 napi_enable(&cp->napi); in cp_open()
1203 cp_init_hw(cp); in cp_open()
1209 cp_enable_irq(cp); in cp_open()
1212 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); in cp_open()
1218 napi_disable(&cp->napi); in cp_open()
1219 cp_stop_hw(cp); in cp_open()
1220 cp_free_rings(cp); in cp_open()
1226 struct cp_private *cp = netdev_priv(dev); in cp_close() local
1229 napi_disable(&cp->napi); in cp_close()
1231 netif_dbg(cp, ifdown, dev, "disabling interface\n"); in cp_close()
1233 spin_lock_irqsave(&cp->lock, flags); in cp_close()
1238 cp_stop_hw(cp); in cp_close()
1240 spin_unlock_irqrestore(&cp->lock, flags); in cp_close()
1242 free_irq(cp->pdev->irq, dev); in cp_close()
1244 cp_free_rings(cp); in cp_close()
1250 struct cp_private *cp = netdev_priv(dev); in cp_tx_timeout() local
1258 spin_lock_irqsave(&cp->lock, flags); in cp_tx_timeout()
1260 cp_stop_hw(cp); in cp_tx_timeout()
1261 cp_clean_rings(cp); in cp_tx_timeout()
1262 rc = cp_init_rings(cp); in cp_tx_timeout()
1263 cp_start_hw(cp); in cp_tx_timeout()
1264 cp_enable_irq(cp); in cp_tx_timeout()
1268 spin_unlock_irqrestore(&cp->lock, flags); in cp_tx_timeout()
1273 struct cp_private *cp = netdev_priv(dev); in cp_change_mtu() local
1282 cp_set_rxbufsize(cp); /* set new rx buf size */ in cp_change_mtu()
1289 cp_set_rxbufsize(cp); in cp_change_mtu()
1306 struct cp_private *cp = netdev_priv(dev); in mdio_read() local
1309 readw(cp->regs + mii_2_8139_map[location]) : 0; in mdio_read()
1316 struct cp_private *cp = netdev_priv(dev); in mdio_write() local
1327 static int netdev_set_wol (struct cp_private *cp, in netdev_set_wol() argument
1354 cp->wol_enabled = (wol->wolopts) ? 1 : 0; in netdev_set_wol()
1360 static void netdev_get_wol (struct cp_private *cp, in netdev_get_wol() argument
1369 if (!cp->wol_enabled) return; in netdev_get_wol()
1384 struct cp_private *cp = netdev_priv(dev); in cp_get_drvinfo() local
1388 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cp_get_drvinfo()
1417 struct cp_private *cp = netdev_priv(dev); in cp_get_settings() local
1421 spin_lock_irqsave(&cp->lock, flags); in cp_get_settings()
1422 rc = mii_ethtool_gset(&cp->mii_if, cmd); in cp_get_settings()
1423 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_settings()
1430 struct cp_private *cp = netdev_priv(dev); in cp_set_settings() local
1434 spin_lock_irqsave(&cp->lock, flags); in cp_set_settings()
1435 rc = mii_ethtool_sset(&cp->mii_if, cmd); in cp_set_settings()
1436 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_settings()
1443 struct cp_private *cp = netdev_priv(dev); in cp_nway_reset() local
1444 return mii_nway_restart(&cp->mii_if); in cp_nway_reset()
1449 struct cp_private *cp = netdev_priv(dev); in cp_get_msglevel() local
1450 return cp->msg_enable; in cp_get_msglevel()
1455 struct cp_private *cp = netdev_priv(dev); in cp_set_msglevel() local
1456 cp->msg_enable = value; in cp_set_msglevel()
1461 struct cp_private *cp = netdev_priv(dev); in cp_set_features() local
1467 spin_lock_irqsave(&cp->lock, flags); in cp_set_features()
1470 cp->cpcmd |= RxChkSum; in cp_set_features()
1472 cp->cpcmd &= ~RxChkSum; in cp_set_features()
1475 cp->cpcmd |= RxVlanOn; in cp_set_features()
1477 cp->cpcmd &= ~RxVlanOn; in cp_set_features()
1479 cpw16_f(CpCmd, cp->cpcmd); in cp_set_features()
1480 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_features()
1488 struct cp_private *cp = netdev_priv(dev); in cp_get_regs() local
1496 spin_lock_irqsave(&cp->lock, flags); in cp_get_regs()
1497 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); in cp_get_regs()
1498 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_regs()
1503 struct cp_private *cp = netdev_priv(dev); in cp_get_wol() local
1506 spin_lock_irqsave (&cp->lock, flags); in cp_get_wol()
1507 netdev_get_wol (cp, wol); in cp_get_wol()
1508 spin_unlock_irqrestore (&cp->lock, flags); in cp_get_wol()
1513 struct cp_private *cp = netdev_priv(dev); in cp_set_wol() local
1517 spin_lock_irqsave (&cp->lock, flags); in cp_set_wol()
1518 rc = netdev_set_wol (cp, wol); in cp_set_wol()
1519 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_wol()
1539 struct cp_private *cp = netdev_priv(dev); in cp_get_ethtool_stats() local
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), in cp_get_ethtool_stats()
1577 tmp_stats[i++] = cp->cp_stats.rx_frags; in cp_get_ethtool_stats()
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); in cp_get_ethtool_stats()
1606 struct cp_private *cp = netdev_priv(dev); in cp_ioctl() local
1613 spin_lock_irqsave(&cp->lock, flags); in cp_ioctl()
1614 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); in cp_ioctl()
1615 spin_unlock_irqrestore(&cp->lock, flags); in cp_ioctl()
1621 struct cp_private *cp = netdev_priv(dev); in cp_set_mac_address() local
1629 spin_lock_irq(&cp->lock); in cp_set_mac_address()
1636 spin_unlock_irq(&cp->lock); in cp_set_mac_address()
1760 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom_len() local
1763 spin_lock_irq(&cp->lock); in cp_get_eeprom_len()
1764 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; in cp_get_eeprom_len()
1765 spin_unlock_irq(&cp->lock); in cp_get_eeprom_len()
1773 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom() local
1782 spin_lock_irq(&cp->lock); in cp_get_eeprom()
1784 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_get_eeprom()
1787 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1793 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1800 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1804 spin_unlock_irq(&cp->lock); in cp_get_eeprom()
1811 struct cp_private *cp = netdev_priv(dev); in cp_set_eeprom() local
1821 spin_lock_irq(&cp->lock); in cp_set_eeprom()
1823 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_set_eeprom()
1826 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; in cp_set_eeprom()
1828 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1835 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1840 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; in cp_set_eeprom()
1842 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1845 spin_unlock_irq(&cp->lock); in cp_set_eeprom()
1850 static void cp_set_d3_state (struct cp_private *cp) in cp_set_d3_state() argument
1852 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ in cp_set_d3_state()
1853 pci_set_power_state (cp->pdev, PCI_D3hot); in cp_set_d3_state()
1877 struct cp_private *cp; in cp_init_one() local
1898 cp = netdev_priv(dev); in cp_init_one()
1899 cp->pdev = pdev; in cp_init_one()
1900 cp->dev = dev; in cp_init_one()
1901 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); in cp_init_one()
1902 spin_lock_init (&cp->lock); in cp_init_one()
1903 cp->mii_if.dev = dev; in cp_init_one()
1904 cp->mii_if.mdio_read = mdio_read; in cp_init_one()
1905 cp->mii_if.mdio_write = mdio_write; in cp_init_one()
1906 cp->mii_if.phy_id = CP_INTERNAL_PHY; in cp_init_one()
1907 cp->mii_if.phy_id_mask = 0x1f; in cp_init_one()
1908 cp->mii_if.reg_num_mask = 0x1f; in cp_init_one()
1909 cp_set_rxbufsize(cp); in cp_init_one()
1958 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | in cp_init_one()
1972 cp->regs = regs; in cp_init_one()
1974 cp_stop_hw(cp); in cp_init_one()
1983 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); in cp_init_one()
2010 if (cp->wol_enabled) in cp_init_one()
2011 cp_set_d3_state (cp); in cp_init_one()
2031 struct cp_private *cp = netdev_priv(dev); in cp_remove_one() local
2034 iounmap(cp->regs); in cp_remove_one()
2035 if (cp->wol_enabled) in cp_remove_one()
2047 struct cp_private *cp = netdev_priv(dev); in cp_suspend() local
2056 spin_lock_irqsave (&cp->lock, flags); in cp_suspend()
2062 spin_unlock_irqrestore (&cp->lock, flags); in cp_suspend()
2065 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); in cp_suspend()
2074 struct cp_private *cp = netdev_priv(dev); in cp_resume() local
2087 cp_init_rings_index (cp); in cp_resume()
2088 cp_init_hw (cp); in cp_resume()
2089 cp_enable_irq(cp); in cp_resume()
2092 spin_lock_irqsave (&cp->lock, flags); in cp_resume()
2094 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_resume()
2096 spin_unlock_irqrestore (&cp->lock, flags); in cp_resume()