Lines Matching refs:fep

69 	struct fs_enet_private *fep = netdev_priv(dev);  in fs_set_multicast_list()  local
71 (*fep->ops->set_multicast_list)(dev); in fs_set_multicast_list()
85 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); in fs_enet_rx_napi() local
86 struct net_device *dev = fep->ndev; in fs_enet_rx_napi()
87 const struct fs_platform_info *fpi = fep->fpi; in fs_enet_rx_napi()
101 bdp = fep->cur_rx; in fs_enet_rx_napi()
104 (*fep->ops->napi_clear_rx_event)(dev); in fs_enet_rx_napi()
107 curidx = bdp - fep->rx_bd_base; in fs_enet_rx_napi()
114 dev_warn(fep->dev, "rcv is not +last\n"); in fs_enet_rx_napi()
121 fep->stats.rx_errors++; in fs_enet_rx_napi()
124 fep->stats.rx_length_errors++; in fs_enet_rx_napi()
127 fep->stats.rx_frame_errors++; in fs_enet_rx_napi()
130 fep->stats.rx_crc_errors++; in fs_enet_rx_napi()
133 fep->stats.rx_crc_errors++; in fs_enet_rx_napi()
135 skb = fep->rx_skbuff[curidx]; in fs_enet_rx_napi()
137 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_rx_napi()
144 skb = fep->rx_skbuff[curidx]; in fs_enet_rx_napi()
146 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_rx_napi()
153 fep->stats.rx_packets++; in fs_enet_rx_napi()
155 fep->stats.rx_bytes += pkt_len + 4; in fs_enet_rx_napi()
182 fep->stats.rx_dropped++; in fs_enet_rx_napi()
187 fep->rx_skbuff[curidx] = skbn; in fs_enet_rx_napi()
188 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, in fs_enet_rx_napi()
200 bdp = fep->rx_bd_base; in fs_enet_rx_napi()
202 (*fep->ops->rx_bd_done)(dev); in fs_enet_rx_napi()
208 fep->cur_rx = bdp; in fs_enet_rx_napi()
213 (*fep->ops->napi_enable_rx)(dev); in fs_enet_rx_napi()
220 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, in fs_enet_tx_napi() local
222 struct net_device *dev = fep->ndev; in fs_enet_tx_napi()
229 spin_lock(&fep->tx_lock); in fs_enet_tx_napi()
230 bdp = fep->dirty_tx; in fs_enet_tx_napi()
233 (*fep->ops->napi_clear_tx_event)(dev); in fs_enet_tx_napi()
237 dirtyidx = bdp - fep->tx_bd_base; in fs_enet_tx_napi()
239 if (fep->tx_free == fep->tx_ring) in fs_enet_tx_napi()
242 skb = fep->tx_skbuff[dirtyidx]; in fs_enet_tx_napi()
251 fep->stats.tx_heartbeat_errors++; in fs_enet_tx_napi()
253 fep->stats.tx_window_errors++; in fs_enet_tx_napi()
255 fep->stats.tx_aborted_errors++; in fs_enet_tx_napi()
257 fep->stats.tx_fifo_errors++; in fs_enet_tx_napi()
259 fep->stats.tx_carrier_errors++; in fs_enet_tx_napi()
262 fep->stats.tx_errors++; in fs_enet_tx_napi()
266 fep->stats.tx_packets++; in fs_enet_tx_napi()
269 dev_warn(fep->dev, in fs_enet_tx_napi()
278 fep->stats.collisions++; in fs_enet_tx_napi()
281 if (fep->mapped_as_page[dirtyidx]) in fs_enet_tx_napi()
282 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_tx_napi()
285 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_enet_tx_napi()
293 fep->tx_skbuff[dirtyidx] = NULL; in fs_enet_tx_napi()
302 bdp = fep->tx_bd_base; in fs_enet_tx_napi()
308 if (++fep->tx_free >= MAX_SKB_FRAGS) in fs_enet_tx_napi()
313 fep->dirty_tx = bdp; in fs_enet_tx_napi()
316 (*fep->ops->tx_restart)(dev); in fs_enet_tx_napi()
320 (*fep->ops->napi_enable_tx)(dev); in fs_enet_tx_napi()
323 spin_unlock(&fep->tx_lock); in fs_enet_tx_napi()
341 struct fs_enet_private *fep; in fs_enet_interrupt() local
348 fep = netdev_priv(dev); in fs_enet_interrupt()
349 fpi = fep->fpi; in fs_enet_interrupt()
352 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { in fs_enet_interrupt()
356 int_clr_events &= ~fep->ev_napi_rx; in fs_enet_interrupt()
358 (*fep->ops->clear_int_events)(dev, int_clr_events); in fs_enet_interrupt()
360 if (int_events & fep->ev_err) in fs_enet_interrupt()
361 (*fep->ops->ev_error)(dev, int_events); in fs_enet_interrupt()
363 if (int_events & fep->ev_rx) { in fs_enet_interrupt()
364 napi_ok = napi_schedule_prep(&fep->napi); in fs_enet_interrupt()
366 (*fep->ops->napi_disable_rx)(dev); in fs_enet_interrupt()
367 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); in fs_enet_interrupt()
372 __napi_schedule(&fep->napi); in fs_enet_interrupt()
375 if (int_events & fep->ev_tx) { in fs_enet_interrupt()
376 napi_ok = napi_schedule_prep(&fep->napi_tx); in fs_enet_interrupt()
378 (*fep->ops->napi_disable_tx)(dev); in fs_enet_interrupt()
379 (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); in fs_enet_interrupt()
384 __napi_schedule(&fep->napi_tx); in fs_enet_interrupt()
394 struct fs_enet_private *fep = netdev_priv(dev); in fs_init_bds() local
401 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; in fs_init_bds()
402 fep->tx_free = fep->tx_ring; in fs_init_bds()
403 fep->cur_rx = fep->rx_bd_base; in fs_init_bds()
408 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { in fs_init_bds()
414 fep->rx_skbuff[i] = skb; in fs_init_bds()
416 dma_map_single(fep->dev, skb->data, in fs_init_bds()
421 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); in fs_init_bds()
426 for (; i < fep->rx_ring; i++, bdp++) { in fs_init_bds()
427 fep->rx_skbuff[i] = NULL; in fs_init_bds()
428 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); in fs_init_bds()
434 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { in fs_init_bds()
435 fep->tx_skbuff[i] = NULL; in fs_init_bds()
438 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); in fs_init_bds()
444 struct fs_enet_private *fep = netdev_priv(dev); in fs_cleanup_bds() local
452 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { in fs_cleanup_bds()
453 if ((skb = fep->tx_skbuff[i]) == NULL) in fs_cleanup_bds()
457 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_cleanup_bds()
460 fep->tx_skbuff[i] = NULL; in fs_cleanup_bds()
467 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { in fs_cleanup_bds()
468 if ((skb = fep->rx_skbuff[i]) == NULL) in fs_cleanup_bds()
472 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), in fs_cleanup_bds()
476 fep->rx_skbuff[i] = NULL; in fs_cleanup_bds()
514 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_start_xmit() local
535 spin_lock(&fep->tx_lock); in fs_enet_start_xmit()
540 bdp = fep->cur_tx; in fs_enet_start_xmit()
542 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { in fs_enet_start_xmit()
544 spin_unlock(&fep->tx_lock); in fs_enet_start_xmit()
550 dev_warn(fep->dev, "tx queue full!.\n"); in fs_enet_start_xmit()
554 curidx = bdp - fep->tx_bd_base; in fs_enet_start_xmit()
557 fep->stats.tx_bytes += len; in fs_enet_start_xmit()
560 fep->tx_free -= nr_frags + 1; in fs_enet_start_xmit()
564 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, in fs_enet_start_xmit()
568 fep->mapped_as_page[curidx] = 0; in fs_enet_start_xmit()
578 bdp = fep->tx_bd_base, curidx = 0; in fs_enet_start_xmit()
581 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, in fs_enet_start_xmit()
585 fep->tx_skbuff[curidx] = NULL; in fs_enet_start_xmit()
586 fep->mapped_as_page[curidx] = 1; in fs_enet_start_xmit()
605 fep->tx_skbuff[curidx] = skb; in fs_enet_start_xmit()
611 bdp = fep->tx_bd_base; in fs_enet_start_xmit()
612 fep->cur_tx = bdp; in fs_enet_start_xmit()
614 if (fep->tx_free < MAX_SKB_FRAGS) in fs_enet_start_xmit()
619 (*fep->ops->tx_kickstart)(dev); in fs_enet_start_xmit()
621 spin_unlock(&fep->tx_lock); in fs_enet_start_xmit()
628 struct fs_enet_private *fep = netdev_priv(dev); in fs_timeout() local
632 fep->stats.tx_errors++; in fs_timeout()
634 spin_lock_irqsave(&fep->lock, flags); in fs_timeout()
637 phy_stop(fep->phydev); in fs_timeout()
638 (*fep->ops->stop)(dev); in fs_timeout()
639 (*fep->ops->restart)(dev); in fs_timeout()
640 phy_start(fep->phydev); in fs_timeout()
643 phy_start(fep->phydev); in fs_timeout()
644 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); in fs_timeout()
645 spin_unlock_irqrestore(&fep->lock, flags); in fs_timeout()
656 struct fs_enet_private *fep = netdev_priv(dev); in generic_adjust_link() local
657 struct phy_device *phydev = fep->phydev; in generic_adjust_link()
662 if (phydev->duplex != fep->oldduplex) { in generic_adjust_link()
664 fep->oldduplex = phydev->duplex; in generic_adjust_link()
667 if (phydev->speed != fep->oldspeed) { in generic_adjust_link()
669 fep->oldspeed = phydev->speed; in generic_adjust_link()
672 if (!fep->oldlink) { in generic_adjust_link()
674 fep->oldlink = 1; in generic_adjust_link()
678 fep->ops->restart(dev); in generic_adjust_link()
679 } else if (fep->oldlink) { in generic_adjust_link()
681 fep->oldlink = 0; in generic_adjust_link()
682 fep->oldspeed = 0; in generic_adjust_link()
683 fep->oldduplex = -1; in generic_adjust_link()
686 if (new_state && netif_msg_link(fep)) in generic_adjust_link()
693 struct fs_enet_private *fep = netdev_priv(dev); in fs_adjust_link() local
696 spin_lock_irqsave(&fep->lock, flags); in fs_adjust_link()
698 if(fep->ops->adjust_link) in fs_adjust_link()
699 fep->ops->adjust_link(dev); in fs_adjust_link()
703 spin_unlock_irqrestore(&fep->lock, flags); in fs_adjust_link()
708 struct fs_enet_private *fep = netdev_priv(dev); in fs_init_phy() local
712 fep->oldlink = 0; in fs_init_phy()
713 fep->oldspeed = 0; in fs_init_phy()
714 fep->oldduplex = -1; in fs_init_phy()
716 iface = fep->fpi->use_rmii ? in fs_init_phy()
719 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, in fs_init_phy()
726 fep->phydev = phydev; in fs_init_phy()
733 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_open() local
739 fs_init_bds(fep->ndev); in fs_enet_open()
741 napi_enable(&fep->napi); in fs_enet_open()
742 napi_enable(&fep->napi_tx); in fs_enet_open()
745 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, in fs_enet_open()
748 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); in fs_enet_open()
749 napi_disable(&fep->napi); in fs_enet_open()
750 napi_disable(&fep->napi_tx); in fs_enet_open()
756 free_irq(fep->interrupt, dev); in fs_enet_open()
757 napi_disable(&fep->napi); in fs_enet_open()
758 napi_disable(&fep->napi_tx); in fs_enet_open()
761 phy_start(fep->phydev); in fs_enet_open()
770 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_close() local
775 napi_disable(&fep->napi); in fs_enet_close()
776 napi_disable(&fep->napi_tx); in fs_enet_close()
777 phy_stop(fep->phydev); in fs_enet_close()
779 spin_lock_irqsave(&fep->lock, flags); in fs_enet_close()
780 spin_lock(&fep->tx_lock); in fs_enet_close()
781 (*fep->ops->stop)(dev); in fs_enet_close()
782 spin_unlock(&fep->tx_lock); in fs_enet_close()
783 spin_unlock_irqrestore(&fep->lock, flags); in fs_enet_close()
786 phy_disconnect(fep->phydev); in fs_enet_close()
787 fep->phydev = NULL; in fs_enet_close()
788 free_irq(fep->interrupt, dev); in fs_enet_close()
795 struct fs_enet_private *fep = netdev_priv(dev); in fs_enet_get_stats() local
796 return &fep->stats; in fs_enet_get_stats()
810 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_regs_len() local
812 return (*fep->ops->get_regs_len)(dev); in fs_get_regs_len()
818 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_regs() local
824 spin_lock_irqsave(&fep->lock, flags); in fs_get_regs()
825 r = (*fep->ops->get_regs)(dev, p, &len); in fs_get_regs()
826 spin_unlock_irqrestore(&fep->lock, flags); in fs_get_regs()
834 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_settings() local
836 if (!fep->phydev) in fs_get_settings()
839 return phy_ethtool_gset(fep->phydev, cmd); in fs_get_settings()
844 struct fs_enet_private *fep = netdev_priv(dev); in fs_set_settings() local
846 if (!fep->phydev) in fs_set_settings()
849 return phy_ethtool_sset(fep->phydev, cmd); in fs_set_settings()
859 struct fs_enet_private *fep = netdev_priv(dev); in fs_get_msglevel() local
860 return fep->msg_enable; in fs_get_msglevel()
865 struct fs_enet_private *fep = netdev_priv(dev); in fs_set_msglevel() local
866 fep->msg_enable = value; in fs_set_msglevel()
884 struct fs_enet_private *fep = netdev_priv(dev); in fs_ioctl() local
889 return phy_mii_ioctl(fep->phydev, rq, cmd); in fs_ioctl()
924 struct fs_enet_private *fep; in fs_enet_probe() local
986 privsize = sizeof(*fep) + in fs_enet_probe()
1000 fep = netdev_priv(ndev); in fs_enet_probe()
1001 fep->dev = &ofdev->dev; in fs_enet_probe()
1002 fep->ndev = ndev; in fs_enet_probe()
1003 fep->fpi = fpi; in fs_enet_probe()
1004 fep->ops = match->data; in fs_enet_probe()
1006 ret = fep->ops->setup_data(ndev); in fs_enet_probe()
1010 fep->rx_skbuff = (struct sk_buff **)&fep[1]; in fs_enet_probe()
1011 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; in fs_enet_probe()
1012 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + in fs_enet_probe()
1015 spin_lock_init(&fep->lock); in fs_enet_probe()
1016 spin_lock_init(&fep->tx_lock); in fs_enet_probe()
1022 ret = fep->ops->allocate_bd(ndev); in fs_enet_probe()
1026 fep->rx_bd_base = fep->ring_base; in fs_enet_probe()
1027 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; in fs_enet_probe()
1029 fep->tx_ring = fpi->tx_ring; in fs_enet_probe()
1030 fep->rx_ring = fpi->rx_ring; in fs_enet_probe()
1034 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); in fs_enet_probe()
1035 netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); in fs_enet_probe()
1039 init_timer(&fep->phy_timer_list); in fs_enet_probe()
1054 fep->ops->free_bd(ndev); in fs_enet_probe()
1056 fep->ops->cleanup_data(ndev); in fs_enet_probe()
1071 struct fs_enet_private *fep = netdev_priv(ndev); in fs_enet_remove() local
1075 fep->ops->free_bd(ndev); in fs_enet_remove()
1076 fep->ops->cleanup_data(ndev); in fs_enet_remove()
1077 dev_set_drvdata(fep->dev, NULL); in fs_enet_remove()
1078 of_node_put(fep->fpi->phy_node); in fs_enet_remove()
1079 if (fep->fpi->clk_per) in fs_enet_remove()
1080 clk_disable_unprepare(fep->fpi->clk_per); in fs_enet_remove()