Lines Matching refs:priv
228 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_dump_regs() local
234 printk("%s: reg[%p]:", dev->name, priv->regs + i); in cpmac_dump_regs()
236 printk(" %08x", cpmac_read(priv->regs, i)); in cpmac_dump_regs()
253 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_dump_all_desc() local
254 struct cpmac_desc *dump = priv->rx_head; in cpmac_dump_all_desc()
259 } while (dump != priv->rx_head); in cpmac_dump_all_desc()
282 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) in cpmac_mdio_read()
284 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | in cpmac_mdio_read()
286 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) in cpmac_mdio_read()
295 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) in cpmac_mdio_write()
297 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | in cpmac_mdio_write()
313 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | in cpmac_mdio_reset()
328 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_multicast_list() local
330 mbp = cpmac_read(priv->regs, CPMAC_MBP); in cpmac_set_multicast_list()
332 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | in cpmac_set_multicast_list()
335 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); in cpmac_set_multicast_list()
338 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); in cpmac_set_multicast_list()
339 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); in cpmac_set_multicast_list()
362 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); in cpmac_set_multicast_list()
363 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); in cpmac_set_multicast_list()
368 static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, in cpmac_rx_one() argument
373 if (unlikely(netif_msg_hw(priv))) in cpmac_rx_one()
374 cpmac_dump_desc(priv->dev, desc); in cpmac_rx_one()
375 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); in cpmac_rx_one()
377 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_rx_one()
378 netdev_warn(priv->dev, "rx: spurious interrupt\n"); in cpmac_rx_one()
383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); in cpmac_rx_one()
386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); in cpmac_rx_one()
388 priv->dev->stats.rx_packets++; in cpmac_rx_one()
389 priv->dev->stats.rx_bytes += desc->datalen; in cpmac_rx_one()
391 dma_unmap_single(&priv->dev->dev, desc->data_mapping, in cpmac_rx_one()
394 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, in cpmac_rx_one()
398 if (unlikely(netif_msg_pktdata(priv))) { in cpmac_rx_one()
399 netdev_dbg(priv->dev, "received packet:\n"); in cpmac_rx_one()
400 cpmac_dump_skb(priv->dev, result); in cpmac_rx_one()
403 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_rx_one()
404 netdev_warn(priv->dev, in cpmac_rx_one()
407 priv->dev->stats.rx_dropped++; in cpmac_rx_one()
420 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); in cpmac_poll() local
423 spin_lock(&priv->rx_lock); in cpmac_poll()
424 if (unlikely(!priv->rx_head)) { in cpmac_poll()
425 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_poll()
426 netdev_warn(priv->dev, "rx: polling, but no queue\n"); in cpmac_poll()
428 spin_unlock(&priv->rx_lock); in cpmac_poll()
433 desc = priv->rx_head; in cpmac_poll()
445 if (netif_msg_rx_err(priv)) in cpmac_poll()
446 netdev_err(priv->dev, "poll found a" in cpmac_poll()
455 skb = cpmac_rx_one(priv, desc); in cpmac_poll()
463 if (desc != priv->rx_head) { in cpmac_poll()
468 priv->rx_head->prev->hw_next = priv->rx_head->mapping; in cpmac_poll()
480 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) in cpmac_poll()
482 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { in cpmac_poll()
486 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; in cpmac_poll()
487 restart = priv->rx_head; in cpmac_poll()
491 priv->dev->stats.rx_errors++; in cpmac_poll()
492 priv->dev->stats.rx_fifo_errors++; in cpmac_poll()
493 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_poll()
494 netdev_warn(priv->dev, "rx dma ring overrun\n"); in cpmac_poll()
497 if (netif_msg_drv(priv)) in cpmac_poll()
498 netdev_err(priv->dev, "cpmac_poll is trying " in cpmac_poll()
504 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); in cpmac_poll()
507 priv->rx_head = desc; in cpmac_poll()
508 spin_unlock(&priv->rx_lock); in cpmac_poll()
509 if (unlikely(netif_msg_rx_status(priv))) in cpmac_poll()
510 netdev_dbg(priv->dev, "poll processed %d packets\n", received); in cpmac_poll()
517 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); in cpmac_poll()
527 if (netif_msg_drv(priv)) { in cpmac_poll()
528 netdev_err(priv->dev, "cpmac_poll is confused. " in cpmac_poll()
530 cpmac_dump_all_desc(priv->dev); in cpmac_poll()
531 netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", in cpmac_poll()
532 cpmac_read(priv->regs, CPMAC_RX_PTR(0)), in cpmac_poll()
533 cpmac_read(priv->regs, CPMAC_RX_ACK(0))); in cpmac_poll()
536 spin_unlock(&priv->rx_lock); in cpmac_poll()
538 netif_tx_stop_all_queues(priv->dev); in cpmac_poll()
539 napi_disable(&priv->napi); in cpmac_poll()
541 atomic_inc(&priv->reset_pending); in cpmac_poll()
542 cpmac_hw_stop(priv->dev); in cpmac_poll()
543 if (!schedule_work(&priv->reset_work)) in cpmac_poll()
544 atomic_dec(&priv->reset_pending); in cpmac_poll()
554 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_start_xmit() local
556 if (unlikely(atomic_read(&priv->reset_pending))) in cpmac_start_xmit()
566 desc = &priv->desc_ring[queue]; in cpmac_start_xmit()
568 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_start_xmit()
574 spin_lock(&priv->lock); in cpmac_start_xmit()
575 spin_unlock(&priv->lock); in cpmac_start_xmit()
583 if (unlikely(netif_msg_tx_queued(priv))) in cpmac_start_xmit()
585 if (unlikely(netif_msg_hw(priv))) in cpmac_start_xmit()
587 if (unlikely(netif_msg_pktdata(priv))) in cpmac_start_xmit()
589 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); in cpmac_start_xmit()
597 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_end_xmit() local
599 desc = &priv->desc_ring[queue]; in cpmac_end_xmit()
600 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); in cpmac_end_xmit()
602 spin_lock(&priv->lock); in cpmac_end_xmit()
605 spin_unlock(&priv->lock); in cpmac_end_xmit()
609 if (unlikely(netif_msg_tx_done(priv))) in cpmac_end_xmit()
618 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_end_xmit()
628 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_hw_stop() local
629 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); in cpmac_hw_stop()
632 cpmac_write(priv->regs, CPMAC_RX_CONTROL, in cpmac_hw_stop()
633 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); in cpmac_hw_stop()
634 cpmac_write(priv->regs, CPMAC_TX_CONTROL, in cpmac_hw_stop()
635 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); in cpmac_hw_stop()
637 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_hw_stop()
638 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); in cpmac_hw_stop()
640 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); in cpmac_hw_stop()
641 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); in cpmac_hw_stop()
642 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); in cpmac_hw_stop()
643 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_hw_stop()
644 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, in cpmac_hw_stop()
645 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); in cpmac_hw_stop()
651 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_hw_start() local
652 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev); in cpmac_hw_start()
656 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_hw_start()
657 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); in cpmac_hw_start()
659 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); in cpmac_hw_start()
661 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | in cpmac_hw_start()
663 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); in cpmac_hw_start()
665 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); in cpmac_hw_start()
666 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); in cpmac_hw_start()
667 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | in cpmac_hw_start()
670 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); in cpmac_hw_start()
671 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); in cpmac_hw_start()
672 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); in cpmac_hw_start()
673 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); in cpmac_hw_start()
674 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_hw_start()
675 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); in cpmac_hw_start()
676 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); in cpmac_hw_start()
677 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); in cpmac_hw_start()
678 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); in cpmac_hw_start()
680 cpmac_write(priv->regs, CPMAC_RX_CONTROL, in cpmac_hw_start()
681 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); in cpmac_hw_start()
682 cpmac_write(priv->regs, CPMAC_TX_CONTROL, in cpmac_hw_start()
683 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); in cpmac_hw_start()
684 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, in cpmac_hw_start()
685 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | in cpmac_hw_start()
691 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_clear_rx() local
695 if (unlikely(!priv->rx_head)) in cpmac_clear_rx()
697 desc = priv->rx_head; in cpmac_clear_rx()
698 for (i = 0; i < priv->ring_size; i++) { in cpmac_clear_rx()
700 if (netif_msg_rx_err(priv) && net_ratelimit()) in cpmac_clear_rx()
702 if (unlikely(netif_msg_hw(priv))) in cpmac_clear_rx()
710 priv->rx_head->prev->hw_next = 0; in cpmac_clear_rx()
715 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_clear_tx() local
718 if (unlikely(!priv->desc_ring)) in cpmac_clear_tx()
721 priv->desc_ring[i].dataflags = 0; in cpmac_clear_tx()
722 if (priv->desc_ring[i].skb) { in cpmac_clear_tx()
723 dev_kfree_skb_any(priv->desc_ring[i].skb); in cpmac_clear_tx()
724 priv->desc_ring[i].skb = NULL; in cpmac_clear_tx()
731 struct cpmac_priv *priv = in cpmac_hw_error() local
734 spin_lock(&priv->rx_lock); in cpmac_hw_error()
735 cpmac_clear_rx(priv->dev); in cpmac_hw_error()
736 spin_unlock(&priv->rx_lock); in cpmac_hw_error()
737 cpmac_clear_tx(priv->dev); in cpmac_hw_error()
738 cpmac_hw_start(priv->dev); in cpmac_hw_error()
740 atomic_dec(&priv->reset_pending); in cpmac_hw_error()
742 netif_tx_wake_all_queues(priv->dev); in cpmac_hw_error()
743 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); in cpmac_hw_error()
748 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_check_status() local
750 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); in cpmac_check_status()
757 if (netif_msg_drv(priv) && net_ratelimit()) { in cpmac_check_status()
773 if (schedule_work(&priv->reset_work)) in cpmac_check_status()
774 atomic_inc(&priv->reset_pending); in cpmac_check_status()
775 if (unlikely(netif_msg_hw(priv))) in cpmac_check_status()
778 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); in cpmac_check_status()
784 struct cpmac_priv *priv; in cpmac_irq() local
788 priv = netdev_priv(dev); in cpmac_irq()
790 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); in cpmac_irq()
792 if (unlikely(netif_msg_intr(priv))) in cpmac_irq()
800 if (napi_schedule_prep(&priv->napi)) { in cpmac_irq()
801 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); in cpmac_irq()
802 __napi_schedule(&priv->napi); in cpmac_irq()
806 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); in cpmac_irq()
816 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_tx_timeout() local
818 spin_lock(&priv->lock); in cpmac_tx_timeout()
820 spin_unlock(&priv->lock); in cpmac_tx_timeout()
821 if (netif_msg_tx_err(priv) && net_ratelimit()) in cpmac_tx_timeout()
824 atomic_inc(&priv->reset_pending); in cpmac_tx_timeout()
828 atomic_dec(&priv->reset_pending); in cpmac_tx_timeout()
830 netif_tx_wake_all_queues(priv->dev); in cpmac_tx_timeout()
835 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_ioctl() local
839 if (!priv->phy) in cpmac_ioctl()
842 return phy_mii_ioctl(priv->phy, ifr, cmd); in cpmac_ioctl()
847 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_get_settings() local
849 if (priv->phy) in cpmac_get_settings()
850 return phy_ethtool_gset(priv->phy, cmd); in cpmac_get_settings()
857 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_settings() local
862 if (priv->phy) in cpmac_set_settings()
863 return phy_ethtool_sset(priv->phy, cmd); in cpmac_set_settings()
871 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_get_ringparam() local
878 ring->rx_pending = priv->ring_size; in cpmac_get_ringparam()
887 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_set_ringparam() local
891 priv->ring_size = ring->rx_pending; in cpmac_set_ringparam()
915 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_adjust_link() local
918 spin_lock(&priv->lock); in cpmac_adjust_link()
919 if (priv->phy->link) { in cpmac_adjust_link()
921 if (priv->phy->duplex != priv->oldduplex) { in cpmac_adjust_link()
923 priv->oldduplex = priv->phy->duplex; in cpmac_adjust_link()
926 if (priv->phy->speed != priv->oldspeed) { in cpmac_adjust_link()
928 priv->oldspeed = priv->phy->speed; in cpmac_adjust_link()
931 if (!priv->oldlink) { in cpmac_adjust_link()
933 priv->oldlink = 1; in cpmac_adjust_link()
935 } else if (priv->oldlink) { in cpmac_adjust_link()
937 priv->oldlink = 0; in cpmac_adjust_link()
938 priv->oldspeed = 0; in cpmac_adjust_link()
939 priv->oldduplex = -1; in cpmac_adjust_link()
942 if (new_state && netif_msg_link(priv) && net_ratelimit()) in cpmac_adjust_link()
943 phy_print_status(priv->phy); in cpmac_adjust_link()
945 spin_unlock(&priv->lock); in cpmac_adjust_link()
951 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_open() local
956 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); in cpmac_open()
958 if (netif_msg_drv(priv)) in cpmac_open()
965 priv->regs = ioremap(mem->start, resource_size(mem)); in cpmac_open()
966 if (!priv->regs) { in cpmac_open()
967 if (netif_msg_drv(priv)) in cpmac_open()
974 size = priv->ring_size + CPMAC_QUEUES; in cpmac_open()
975 priv->desc_ring = dma_alloc_coherent(&dev->dev, in cpmac_open()
977 &priv->dma_ring, in cpmac_open()
979 if (!priv->desc_ring) { in cpmac_open()
985 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; in cpmac_open()
987 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; in cpmac_open()
988 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { in cpmac_open()
1001 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; in cpmac_open()
1006 priv->rx_head->prev->hw_next = (u32)0; in cpmac_open()
1010 if (netif_msg_drv(priv)) in cpmac_open()
1016 atomic_set(&priv->reset_pending, 0); in cpmac_open()
1017 INIT_WORK(&priv->reset_work, cpmac_hw_error); in cpmac_open()
1020 napi_enable(&priv->napi); in cpmac_open()
1021 priv->phy->state = PHY_CHANGELINK; in cpmac_open()
1022 phy_start(priv->phy); in cpmac_open()
1028 for (i = 0; i < priv->ring_size; i++) { in cpmac_open()
1029 if (priv->rx_head[i].skb) { in cpmac_open()
1031 priv->rx_head[i].data_mapping, in cpmac_open()
1034 kfree_skb(priv->rx_head[i].skb); in cpmac_open()
1038 kfree(priv->desc_ring); in cpmac_open()
1039 iounmap(priv->regs); in cpmac_open()
1051 struct cpmac_priv *priv = netdev_priv(dev); in cpmac_stop() local
1056 cancel_work_sync(&priv->reset_work); in cpmac_stop()
1057 napi_disable(&priv->napi); in cpmac_stop()
1058 phy_stop(priv->phy); in cpmac_stop()
1063 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); in cpmac_stop()
1064 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); in cpmac_stop()
1065 cpmac_write(priv->regs, CPMAC_MBP, 0); in cpmac_stop()
1068 iounmap(priv->regs); in cpmac_stop()
1069 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); in cpmac_stop()
1071 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; in cpmac_stop()
1072 for (i = 0; i < priv->ring_size; i++) { in cpmac_stop()
1073 if (priv->rx_head[i].skb) { in cpmac_stop()
1075 priv->rx_head[i].data_mapping, in cpmac_stop()
1078 kfree_skb(priv->rx_head[i].skb); in cpmac_stop()
1083 (CPMAC_QUEUES + priv->ring_size), in cpmac_stop()
1084 priv->desc_ring, priv->dma_ring); in cpmac_stop()
1108 struct cpmac_priv *priv; in cpmac_probe() local
1136 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); in cpmac_probe()
1141 priv = netdev_priv(dev); in cpmac_probe()
1143 priv->pdev = pdev; in cpmac_probe()
1155 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); in cpmac_probe()
1157 spin_lock_init(&priv->lock); in cpmac_probe()
1158 spin_lock_init(&priv->rx_lock); in cpmac_probe()
1159 priv->dev = dev; in cpmac_probe()
1160 priv->ring_size = 64; in cpmac_probe()
1161 priv->msg_enable = netif_msg_init(debug_level, 0xff); in cpmac_probe()
1164 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, in cpmac_probe()
1167 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, in cpmac_probe()
1170 if (IS_ERR(priv->phy)) { in cpmac_probe()
1171 if (netif_msg_drv(priv)) in cpmac_probe()
1174 rc = PTR_ERR(priv->phy); in cpmac_probe()
1184 if (netif_msg_probe(priv)) { in cpmac_probe()
1187 priv->phy_name, dev->dev_addr); in cpmac_probe()
1231 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); in cpmac_init()
1233 if (!cpmac_mii->priv) { in cpmac_init()
1249 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); in cpmac_init()
1279 iounmap(cpmac_mii->priv); in cpmac_init()
1291 iounmap(cpmac_mii->priv); in cpmac_exit()