Lines Matching refs:fep

69 	struct fs_enet_private *fep = netdev_priv(dev);  in fs_set_multicast_list()  local
71 (*fep->ops->set_multicast_list)(dev); in fs_set_multicast_list()
85 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); in fs_enet_rx_napi() local
86 struct net_device *dev = fep->ndev; in fs_enet_rx_napi()
87 const struct fs_platform_info *fpi = fep->fpi; in fs_enet_rx_napi()
101 bdp = fep->cur_rx; in fs_enet_rx_napi()
104 (*fep->ops->napi_clear_rx_event)(dev); in fs_enet_rx_napi()
107 curidx = bdp - fep->rx_bd_base; in fs_enet_rx_napi()
114 dev_warn(fep->dev, "rcv is not +last\n"); in fs_enet_rx_napi()
121 fep->stats.rx_errors++; in fs_enet_rx_napi()
124 fep->stats.rx_length_errors++; in fs_enet_rx_napi()
127 fep->stats.rx_frame_errors++; in fs_enet_rx_napi()
130 fep->stats.rx_crc_errors++; in fs_enet_rx_napi()
133 fep->stats.rx_crc_errors++; in fs_enet_rx_napi()
135 skb = fep->rx_skbuff[curidx]; in fs_enet_rx_napi()
137 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_rx_napi()
144 skb = fep->rx_skbuff[curidx]; in fs_enet_rx_napi()
146 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_rx_napi()
153 fep->stats.rx_packets++; in fs_enet_rx_napi()
155 fep->stats.rx_bytes += pkt_len + 4; in fs_enet_rx_napi()
179 fep->stats.rx_dropped++; in fs_enet_rx_napi()
184 fep->rx_skbuff[curidx] = skbn; in fs_enet_rx_napi()
185 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, in fs_enet_rx_napi()
197 bdp = fep->rx_bd_base; in fs_enet_rx_napi()
199 (*fep->ops->rx_bd_done)(dev); in fs_enet_rx_napi()
205 fep->cur_rx = bdp; in fs_enet_rx_napi()
210 (*fep->ops->napi_enable_rx)(dev); in fs_enet_rx_napi()
217 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, in fs_enet_tx_napi() local
219 struct net_device *dev = fep->ndev; in fs_enet_tx_napi()
226 spin_lock(&fep->tx_lock); in fs_enet_tx_napi()
227 bdp = fep->dirty_tx; in fs_enet_tx_napi()
230 (*fep->ops->napi_clear_tx_event)(dev); in fs_enet_tx_napi()
234 dirtyidx = bdp - fep->tx_bd_base; in fs_enet_tx_napi()
236 if (fep->tx_free == fep->tx_ring) in fs_enet_tx_napi()
239 skb = fep->tx_skbuff[dirtyidx]; in fs_enet_tx_napi()
248 fep->stats.tx_heartbeat_errors++; in fs_enet_tx_napi()
250 fep->stats.tx_window_errors++; in fs_enet_tx_napi()
252 fep->stats.tx_aborted_errors++; in fs_enet_tx_napi()
254 fep->stats.tx_fifo_errors++; in fs_enet_tx_napi()
256 fep->stats.tx_carrier_errors++; in fs_enet_tx_napi()
259 fep->stats.tx_errors++; in fs_enet_tx_napi()
263 fep->stats.tx_packets++; in fs_enet_tx_napi()
266 dev_warn(fep->dev, in fs_enet_tx_napi()
275 fep->stats.collisions++; in fs_enet_tx_napi()
278 if (fep->mapped_as_page[dirtyidx]) in fs_enet_tx_napi()
279 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_tx_napi()
282 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_tx_napi()
290 fep->tx_skbuff[dirtyidx] = NULL; in fs_enet_tx_napi()
299 bdp = fep->tx_bd_base; in fs_enet_tx_napi()
305 if (++fep->tx_free >= MAX_SKB_FRAGS) in fs_enet_tx_napi()
310 fep->dirty_tx = bdp; in fs_enet_tx_napi()
313 (*fep->ops->tx_restart)(dev); in fs_enet_tx_napi()
317 (*fep->ops->napi_enable_tx)(dev); in fs_enet_tx_napi()
320 spin_unlock(&fep->tx_lock); in fs_enet_tx_napi()
338 struct fs_enet_private *fep; in fs_enet_interrupt() local
345 fep = netdev_priv(dev); in fs_enet_interrupt()
346 fpi = fep->fpi; in fs_enet_interrupt()
349 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { in fs_enet_interrupt()
353 int_clr_events &= ~fep->ev_napi_rx; in fs_enet_interrupt()
355 (*fep->ops->clear_int_events)(dev, int_clr_events); in fs_enet_interrupt()
357 if (int_events & fep->ev_err) in fs_enet_interrupt()
358 (*fep->ops->ev_error)(dev, int_events); in fs_enet_interrupt()
360 if (int_events & fep->ev_rx) { in fs_enet_interrupt()
361 napi_ok = napi_schedule_prep(&fep->napi); in fs_enet_interrupt()
363 (*fep->ops->napi_disable_rx)(dev); in fs_enet_interrupt()
364 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); in fs_enet_interrupt()
369 __napi_schedule(&fep->napi); in fs_enet_interrupt()
372 if (int_events & fep->ev_tx) { in fs_enet_interrupt()
373 napi_ok = napi_schedule_prep(&fep->napi_tx); in fs_enet_interrupt()
375 (*fep->ops->napi_disable_tx)(dev); in fs_enet_interrupt()
376 (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); in fs_enet_interrupt()
381 __napi_schedule(&fep->napi_tx); in fs_enet_interrupt()
391 struct fs_enet_private *fep = netdev_priv(dev); in fs_init_bds() local
398 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; in fs_init_bds()
399 fep->tx_free = fep->tx_ring; in fs_init_bds()
400 fep->cur_rx = fep->rx_bd_base; in fs_init_bds()
405 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { in fs_init_bds()
411 fep->rx_skbuff[i] = skb; in fs_init_bds()
413 dma_map_single(fep->dev, skb->data, in fs_init_bds()
418 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); in fs_init_bds()
423 for (; i < fep->rx_ring; i++, bdp++) { in fs_init_bds()
424 fep->rx_skbuff[i] = NULL; in fs_init_bds()
425 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); in fs_init_bds()
431 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { in fs_init_bds()
432 fep->tx_skbuff[i] = NULL; in fs_init_bds()
435 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); in fs_init_bds()
441 struct fs_enet_private *fep = netdev_priv(dev); in fs_cleanup_bds() local
449 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { in fs_cleanup_bds()
450 if ((skb = fep->tx_skbuff[i]) == NULL) in fs_cleanup_bds()
454 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_cleanup_bds()
457 fep->tx_skbuff[i] = NULL; in fs_cleanup_bds()
464 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { in fs_cleanup_bds()
465 if ((skb = fep->rx_skbuff[i]) == NULL) in fs_cleanup_bds()
469 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_cleanup_bds()
473 fep->rx_skbuff[i] = NULL; in fs_cleanup_bds()
514 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_start_xmit() local
551 spin_lock(&fep->tx_lock); in fs_enet_start_xmit()
556 bdp = fep->cur_tx; in fs_enet_start_xmit()
559 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { in fs_enet_start_xmit()
561 spin_unlock(&fep->tx_lock); in fs_enet_start_xmit()
567 dev_warn(fep->dev, "tx queue full!.\n"); in fs_enet_start_xmit()
571 curidx = bdp - fep->tx_bd_base; in fs_enet_start_xmit()
574 fep->stats.tx_bytes += len; in fs_enet_start_xmit()
577 fep->tx_free -= nr_frags + 1; in fs_enet_start_xmit()
581 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, in fs_enet_start_xmit()
585 fep->mapped_as_page[curidx] = 0; in fs_enet_start_xmit()
596 bdp = fep->tx_bd_base, curidx = 0; in fs_enet_start_xmit()
599 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, in fs_enet_start_xmit()
603 fep->tx_skbuff[curidx] = NULL; in fs_enet_start_xmit()
604 fep->mapped_as_page[curidx] = 1; in fs_enet_start_xmit()
623 fep->tx_skbuff[curidx] = skb; in fs_enet_start_xmit()
629 bdp = fep->tx_bd_base; in fs_enet_start_xmit()
630 fep->cur_tx = bdp; in fs_enet_start_xmit()
632 if (fep->tx_free < MAX_SKB_FRAGS) in fs_enet_start_xmit()
637 (*fep->ops->tx_kickstart)(dev); in fs_enet_start_xmit()
639 spin_unlock(&fep->tx_lock); in fs_enet_start_xmit()
646 struct fs_enet_private *fep = netdev_priv(dev); in fs_timeout() local
650 fep->stats.tx_errors++; in fs_timeout()
652 spin_lock_irqsave(&fep->lock, flags); in fs_timeout()
655 phy_stop(fep->phydev); in fs_timeout()
656 (*fep->ops->stop)(dev); in fs_timeout()
657 (*fep->ops->restart)(dev); in fs_timeout()
658 phy_start(fep->phydev); in fs_timeout()
661 phy_start(fep->phydev); in fs_timeout()
662 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); in fs_timeout()
663 spin_unlock_irqrestore(&fep->lock, flags); in fs_timeout()
674 struct fs_enet_private *fep = netdev_priv(dev); in generic_adjust_link() local
675 struct phy_device *phydev = fep->phydev; in generic_adjust_link()
680 if (phydev->duplex != fep->oldduplex) { in generic_adjust_link()
682 fep->oldduplex = phydev->duplex; in generic_adjust_link()
685 if (phydev->speed != fep->oldspeed) { in generic_adjust_link()
687 fep->oldspeed = phydev->speed; in generic_adjust_link()
690 if (!fep->oldlink) { in generic_adjust_link()
692 fep->oldlink = 1; in generic_adjust_link()
696 fep->ops->restart(dev); in generic_adjust_link()
697 } else if (fep->oldlink) { in generic_adjust_link()
699 fep->oldlink = 0; in generic_adjust_link()
700 fep->oldspeed = 0; in generic_adjust_link()
701 fep->oldduplex = -1; in generic_adjust_link()
704 if (new_state && netif_msg_link(fep)) in generic_adjust_link()
711 struct fs_enet_private *fep = netdev_priv(dev); in fs_adjust_link() local
714 spin_lock_irqsave(&fep->lock, flags); in fs_adjust_link()
716 if(fep->ops->adjust_link) in fs_adjust_link()
717 fep->ops->adjust_link(dev); in fs_adjust_link()
721 spin_unlock_irqrestore(&fep->lock, flags); in fs_adjust_link()
726 struct fs_enet_private *fep = netdev_priv(dev); in fs_init_phy() local
730 fep->oldlink = 0; in fs_init_phy()
731 fep->oldspeed = 0; in fs_init_phy()
732 fep->oldduplex = -1; in fs_init_phy()
734 iface = fep->fpi->use_rmii ? in fs_init_phy()
737 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, in fs_init_phy()
744 fep->phydev = phydev; in fs_init_phy()
751 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_open() local
757 fs_init_bds(fep->ndev); in fs_enet_open()
759 napi_enable(&fep->napi); in fs_enet_open()
760 napi_enable(&fep->napi_tx); in fs_enet_open()
763 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, in fs_enet_open()
766 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); in fs_enet_open()
767 napi_disable(&fep->napi); in fs_enet_open()
768 napi_disable(&fep->napi_tx); in fs_enet_open()
774 free_irq(fep->interrupt, dev); in fs_enet_open()
775 napi_disable(&fep->napi); in fs_enet_open()
776 napi_disable(&fep->napi_tx); in fs_enet_open()
779 phy_start(fep->phydev); in fs_enet_open()
788 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_close() local
793 napi_disable(&fep->napi); in fs_enet_close()
794 napi_disable(&fep->napi_tx); in fs_enet_close()
795 phy_stop(fep->phydev); in fs_enet_close()
797 spin_lock_irqsave(&fep->lock, flags); in fs_enet_close()
798 spin_lock(&fep->tx_lock); in fs_enet_close()
799 (*fep->ops->stop)(dev); in fs_enet_close()
800 spin_unlock(&fep->tx_lock); in fs_enet_close()
801 spin_unlock_irqrestore(&fep->lock, flags); in fs_enet_close()
804 phy_disconnect(fep->phydev); in fs_enet_close()
805 fep->phydev = NULL; in fs_enet_close()
806 free_irq(fep->interrupt, dev); in fs_enet_close()
813 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_get_stats() local
814 return &fep->stats; in fs_enet_get_stats()
828 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_regs_len() local
830 return (*fep->ops->get_regs_len)(dev); in fs_get_regs_len()
836 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_regs() local
842 spin_lock_irqsave(&fep->lock, flags); in fs_get_regs()
843 r = (*fep->ops->get_regs)(dev, p, &len); in fs_get_regs()
844 spin_unlock_irqrestore(&fep->lock, flags); in fs_get_regs()
852 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_settings() local
854 if (!fep->phydev) in fs_get_settings()
857 return phy_ethtool_gset(fep->phydev, cmd); in fs_get_settings()
862 struct fs_enet_private *fep = netdev_priv(dev); in fs_set_settings() local
864 if (!fep->phydev) in fs_set_settings()
867 return phy_ethtool_sset(fep->phydev, cmd); in fs_set_settings()
877 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_msglevel() local
878 return fep->msg_enable; in fs_get_msglevel()
883 struct fs_enet_private *fep = netdev_priv(dev); in fs_set_msglevel() local
884 fep->msg_enable = value; in fs_set_msglevel()
902 struct fs_enet_private *fep = netdev_priv(dev); in fs_ioctl() local
907 return phy_mii_ioctl(fep->phydev, rq, cmd); in fs_ioctl()
942 struct fs_enet_private *fep; in fs_enet_probe() local
1004 privsize = sizeof(*fep) + in fs_enet_probe()
1018 fep = netdev_priv(ndev); in fs_enet_probe()
1019 fep->dev = &ofdev->dev; in fs_enet_probe()
1020 fep->ndev = ndev; in fs_enet_probe()
1021 fep->fpi = fpi; in fs_enet_probe()
1022 fep->ops = match->data; in fs_enet_probe()
1024 ret = fep->ops->setup_data(ndev); in fs_enet_probe()
1028 fep->rx_skbuff = (struct sk_buff **)&fep[1]; in fs_enet_probe()
1029 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; in fs_enet_probe()
1030 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + in fs_enet_probe()
1033 spin_lock_init(&fep->lock); in fs_enet_probe()
1034 spin_lock_init(&fep->tx_lock); in fs_enet_probe()
1040 ret = fep->ops->allocate_bd(ndev); in fs_enet_probe()
1044 fep->rx_bd_base = fep->ring_base; in fs_enet_probe()
1045 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; in fs_enet_probe()
1047 fep->tx_ring = fpi->tx_ring; in fs_enet_probe()
1048 fep->rx_ring = fpi->rx_ring; in fs_enet_probe()
1052 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); in fs_enet_probe()
1053 netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); in fs_enet_probe()
1057 init_timer(&fep->phy_timer_list); in fs_enet_probe()
1072 fep->ops->free_bd(ndev); in fs_enet_probe()
1074 fep->ops->cleanup_data(ndev); in fs_enet_probe()
1089 struct fs_enet_private *fep = netdev_priv(ndev); in fs_enet_remove() local
1093 fep->ops->free_bd(ndev); in fs_enet_remove()
1094 fep->ops->cleanup_data(ndev); in fs_enet_remove()
1095 dev_set_drvdata(fep->dev, NULL); in fs_enet_remove()
1096 of_node_put(fep->fpi->phy_node); in fs_enet_remove()
1097 if (fep->fpi->clk_per) in fs_enet_remove()
1098 clk_disable_unprepare(fep->fpi->clk_per); in fs_enet_remove()