Lines Matching refs:fep
225 struct fec_enet_private *fep, in fec_enet_get_nextdesc() argument
230 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_nextdesc()
231 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; in fec_enet_get_nextdesc()
246 if (fep->bufdesc_ex) in fec_enet_get_nextdesc()
256 struct fec_enet_private *fep, in fec_enet_get_prevdesc() argument
261 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_prevdesc()
262 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; in fec_enet_get_prevdesc()
277 if (fep->bufdesc_ex) in fec_enet_get_prevdesc()
285 struct fec_enet_private *fep) in fec_enet_get_bd_index() argument
287 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; in fec_enet_get_bd_index()
290 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, in fec_enet_get_free_txdesc_num() argument
296 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; in fec_enet_get_free_txdesc_num()
322 struct fec_enet_private *fep = netdev_priv(ndev); in fec_dump() local
330 txq = fep->tx_queue[0]; in fec_dump()
340 bdp = fec_enet_get_nextdesc(bdp, fep, 0); in fec_dump()
372 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_frag_skb() local
388 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_frag_skb()
399 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
402 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_frag_skb()
407 if (fep->bufdesc_ex) { in fec_enet_txq_submit_frag_skb()
408 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_frag_skb()
418 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_frag_skb()
419 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_frag_skb()
420 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_frag_skb()
424 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_frag_skb()
428 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, in fec_enet_txq_submit_frag_skb()
430 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_frag_skb()
446 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_frag_skb()
447 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_txq_submit_frag_skb()
456 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_skb() local
468 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_txq_submit_skb()
493 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_skb()
494 if (((unsigned long) bufaddr) & fep->tx_align || in fec_enet_txq_submit_skb()
495 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_submit_skb()
499 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_submit_skb()
504 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); in fec_enet_txq_submit_skb()
505 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_submit_skb()
518 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
521 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
526 if (fep->bufdesc_ex) { in fec_enet_txq_submit_skb()
531 fep->hwts_tx_en)) in fec_enet_txq_submit_skb()
534 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_submit_skb()
544 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); in fec_enet_txq_submit_skb()
558 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); in fec_enet_txq_submit_skb()
569 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); in fec_enet_txq_submit_skb()
580 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_data_tso() local
592 if (((unsigned long) data) & fep->tx_align || in fec_enet_txq_put_data_tso()
593 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_data_tso()
597 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_data_tso()
601 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); in fec_enet_txq_put_data_tso()
602 if (dma_mapping_error(&fep->pdev->dev, addr)) { in fec_enet_txq_put_data_tso()
612 if (fep->bufdesc_ex) { in fec_enet_txq_put_data_tso()
613 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_data_tso()
626 if (fep->bufdesc_ex) in fec_enet_txq_put_data_tso()
640 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_put_hdr_tso() local
655 if (((unsigned long)bufaddr) & fep->tx_align || in fec_enet_txq_put_hdr_tso()
656 fep->quirks & FEC_QUIRK_SWAP_FRAME) { in fec_enet_txq_put_hdr_tso()
660 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) in fec_enet_txq_put_hdr_tso()
663 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, in fec_enet_txq_put_hdr_tso()
665 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { in fec_enet_txq_put_hdr_tso()
676 if (fep->bufdesc_ex) { in fec_enet_txq_put_hdr_tso()
677 if (fep->quirks & FEC_QUIRK_HAS_AVB) in fec_enet_txq_put_hdr_tso()
694 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_txq_submit_tso() local
703 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { in fec_enet_txq_submit_tso()
723 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_tso()
738 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_tso()
740 bdp, fep); in fec_enet_txq_submit_tso()
753 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_txq_submit_tso()
763 if (!(fep->quirks & FEC_QUIRK_ERR007885) || in fec_enet_txq_submit_tso()
764 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || in fec_enet_txq_submit_tso()
765 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || in fec_enet_txq_submit_tso()
766 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || in fec_enet_txq_submit_tso()
767 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) in fec_enet_txq_submit_tso()
768 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); in fec_enet_txq_submit_tso()
780 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_start_xmit() local
788 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
798 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_start_xmit()
809 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_bd_init() local
816 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_bd_init()
818 rxq = fep->rx_queue[q]; in fec_enet_bd_init()
828 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_bd_init()
832 bdp = fec_enet_get_prevdesc(bdp, fep, q); in fec_enet_bd_init()
838 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_bd_init()
840 txq = fep->tx_queue[q]; in fec_enet_bd_init()
852 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_bd_init()
856 bdp = fec_enet_get_prevdesc(bdp, fep, q); in fec_enet_bd_init()
864 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_active_rxring() local
867 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_active_rxring()
868 writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); in fec_enet_active_rxring()
873 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_enable_ring() local
878 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_enable_ring()
879 rxq = fep->rx_queue[i]; in fec_enet_enable_ring()
880 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); in fec_enet_enable_ring()
881 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); in fec_enet_enable_ring()
886 fep->hwp + FEC_RCMR(i)); in fec_enet_enable_ring()
889 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_enable_ring()
890 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
891 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
896 fep->hwp + FEC_DMA_CFG(i)); in fec_enet_enable_ring()
902 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_reset_skb() local
906 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_reset_skb()
907 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
926 struct fec_enet_private *fep = netdev_priv(ndev); in fec_restart() local
936 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_restart()
937 writel(0, fep->hwp + FEC_ECNTRL); in fec_restart()
939 writel(1, fep->hwp + FEC_ECNTRL); in fec_restart()
947 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
949 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); in fec_restart()
950 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); in fec_restart()
954 writel(0xffffffff, fep->hwp + FEC_IEVENT); in fec_restart()
964 if (fep->full_duplex == DUPLEX_FULL) { in fec_restart()
966 writel(0x04, fep->hwp + FEC_X_CNTRL); in fec_restart()
970 writel(0x0, fep->hwp + FEC_X_CNTRL); in fec_restart()
974 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_restart()
977 if (fep->quirks & FEC_QUIRK_HAS_RACC) { in fec_restart()
979 val = readl(fep->hwp + FEC_RACC); in fec_restart()
980 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) in fec_restart()
984 writel(val, fep->hwp + FEC_RACC); in fec_restart()
992 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
997 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || in fec_restart()
998 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in fec_restart()
999 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || in fec_restart()
1000 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) in fec_restart()
1002 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1008 if (fep->phy_dev) { in fec_restart()
1009 if (fep->phy_dev->speed == SPEED_1000) in fec_restart()
1011 else if (fep->phy_dev->speed == SPEED_100) in fec_restart()
1018 if (fep->quirks & FEC_QUIRK_USE_GASKET) { in fec_restart()
1021 writel(0, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1022 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) in fec_restart()
1030 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) in fec_restart()
1032 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) in fec_restart()
1034 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); in fec_restart()
1037 writel(2, fep->hwp + FEC_MIIGSK_ENR); in fec_restart()
1044 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || in fec_restart()
1045 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && in fec_restart()
1046 fep->phy_dev && fep->phy_dev->pause)) { in fec_restart()
1050 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); in fec_restart()
1051 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); in fec_restart()
1052 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); in fec_restart()
1053 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); in fec_restart()
1056 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); in fec_restart()
1062 writel(rcntl, fep->hwp + FEC_R_CNTRL); in fec_restart()
1067 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); in fec_restart()
1068 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); in fec_restart()
1071 if (fep->quirks & FEC_QUIRK_ENET_MAC) { in fec_restart()
1075 writel(1 << 8, fep->hwp + FEC_X_WMRK); in fec_restart()
1078 if (fep->bufdesc_ex) in fec_restart()
1083 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); in fec_restart()
1087 writel(ecntl, fep->hwp + FEC_ECNTRL); in fec_restart()
1090 if (fep->bufdesc_ex) in fec_restart()
1094 if (fep->link) in fec_restart()
1095 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_restart()
1097 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); in fec_restart()
1107 struct fec_enet_private *fep = netdev_priv(ndev); in fec_stop() local
1108 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; in fec_stop()
1109 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); in fec_stop()
1113 if (fep->link) { in fec_stop()
1114 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ in fec_stop()
1116 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) in fec_stop()
1124 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1125 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_stop()
1126 writel(0, fep->hwp + FEC_ECNTRL); in fec_stop()
1128 writel(1, fep->hwp + FEC_ECNTRL); in fec_stop()
1131 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_stop()
1133 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); in fec_stop()
1134 val = readl(fep->hwp + FEC_ECNTRL); in fec_stop()
1136 writel(val, fep->hwp + FEC_ECNTRL); in fec_stop()
1141 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_stop()
1144 if (fep->quirks & FEC_QUIRK_ENET_MAC && in fec_stop()
1145 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { in fec_stop()
1146 writel(2, fep->hwp + FEC_ECNTRL); in fec_stop()
1147 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); in fec_stop()
1155 struct fec_enet_private *fep = netdev_priv(ndev); in fec_timeout() local
1161 schedule_work(&fep->tx_timeout_work); in fec_timeout()
1166 struct fec_enet_private *fep = in fec_enet_timeout_work() local
1168 struct net_device *ndev = fep->netdev; in fec_enet_timeout_work()
1172 napi_disable(&fep->napi); in fec_enet_timeout_work()
1177 napi_enable(&fep->napi); in fec_enet_timeout_work()
1183 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, in fec_enet_hwtstamp() argument
1189 spin_lock_irqsave(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1190 ns = timecounter_cyc2time(&fep->tc, ts); in fec_enet_hwtstamp()
1191 spin_unlock_irqrestore(&fep->tmreg_lock, flags); in fec_enet_hwtstamp()
1200 struct fec_enet_private *fep; in fec_enet_tx_queue() local
1209 fep = netdev_priv(ndev); in fec_enet_tx_queue()
1213 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1219 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1228 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_tx_queue()
1233 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_tx_queue()
1237 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1262 fep->bufdesc_ex) { in fec_enet_tx_queue()
1266 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); in fec_enet_tx_queue()
1286 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_tx_queue()
1291 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_tx_queue()
1299 readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) in fec_enet_tx_queue()
1300 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); in fec_enet_tx_queue()
1306 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_tx() local
1309 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { in fec_enet_tx()
1310 clear_bit(queue_id, &fep->work_tx); in fec_enet_tx()
1319 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_new_rxbdp() local
1322 off = ((unsigned long)skb->data) & fep->rx_align; in fec_enet_new_rxbdp()
1324 skb_reserve(skb, fep->rx_align + 1 - off); in fec_enet_new_rxbdp()
1326 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, in fec_enet_new_rxbdp()
1327 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_new_rxbdp()
1329 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { in fec_enet_new_rxbdp()
1341 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_copybreak() local
1344 if (length > fep->rx_copybreak) in fec_enet_copybreak()
1351 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_copybreak()
1352 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_copybreak()
1371 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_queue() local
1385 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; in fec_enet_rx_queue()
1391 rxq = fep->rx_queue[queue_id]; in fec_enet_rx_queue()
1410 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); in fec_enet_rx_queue()
1443 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); in fec_enet_rx_queue()
1458 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_rx_queue()
1459 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1471 if (fep->bufdesc_ex) in fec_enet_rx_queue()
1477 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { in fec_enet_rx_queue()
1492 if (fep->hwts_rx_en && fep->bufdesc_ex) in fec_enet_rx_queue()
1493 fec_enet_hwtstamp(fep, ebdp->ts, in fec_enet_rx_queue()
1496 if (fep->bufdesc_ex && in fec_enet_rx_queue()
1497 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { in fec_enet_rx_queue()
1512 napi_gro_receive(&fep->napi, skb); in fec_enet_rx_queue()
1515 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, in fec_enet_rx_queue()
1516 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_rx_queue()
1531 if (fep->bufdesc_ex) { in fec_enet_rx_queue()
1540 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); in fec_enet_rx_queue()
1546 writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); in fec_enet_rx_queue()
1557 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx() local
1559 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { in fec_enet_rx()
1566 clear_bit(queue_id, &fep->work_rx); in fec_enet_rx()
1574 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) in fec_enet_collect_events() argument
1580 fep->work_rx |= (1 << 2); in fec_enet_collect_events()
1582 fep->work_rx |= (1 << 0); in fec_enet_collect_events()
1584 fep->work_rx |= (1 << 1); in fec_enet_collect_events()
1587 fep->work_tx |= (1 << 2); in fec_enet_collect_events()
1589 fep->work_tx |= (1 << 0); in fec_enet_collect_events()
1591 fep->work_tx |= (1 << 1); in fec_enet_collect_events()
1600 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_interrupt() local
1604 int_events = readl(fep->hwp + FEC_IEVENT); in fec_enet_interrupt()
1605 writel(int_events, fep->hwp + FEC_IEVENT); in fec_enet_interrupt()
1606 fec_enet_collect_events(fep, int_events); in fec_enet_interrupt()
1608 if ((fep->work_tx || fep->work_rx) && fep->link) { in fec_enet_interrupt()
1611 if (napi_schedule_prep(&fep->napi)) { in fec_enet_interrupt()
1613 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); in fec_enet_interrupt()
1614 __napi_schedule(&fep->napi); in fec_enet_interrupt()
1620 complete(&fep->mdio_done); in fec_enet_interrupt()
1623 if (fep->ptp_clock) in fec_enet_interrupt()
1624 fec_ptp_check_pps_event(fep); in fec_enet_interrupt()
1632 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_rx_napi() local
1641 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); in fec_enet_rx_napi()
1649 struct fec_enet_private *fep = netdev_priv(ndev); in fec_get_mac() local
1650 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); in fec_get_mac()
1665 struct device_node *np = fep->pdev->dev.of_node; in fec_get_mac()
1691 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); in fec_get_mac()
1693 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); in fec_get_mac()
1713 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; in fec_get_mac()
1723 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_adjust_link() local
1724 struct phy_device *phy_dev = fep->phy_dev; in fec_enet_adjust_link()
1728 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { in fec_enet_adjust_link()
1739 fep->link = 0; in fec_enet_adjust_link()
1741 if (!fep->link) { in fec_enet_adjust_link()
1742 fep->link = phy_dev->link; in fec_enet_adjust_link()
1746 if (fep->full_duplex != phy_dev->duplex) { in fec_enet_adjust_link()
1747 fep->full_duplex = phy_dev->duplex; in fec_enet_adjust_link()
1751 if (phy_dev->speed != fep->speed) { in fec_enet_adjust_link()
1752 fep->speed = phy_dev->speed; in fec_enet_adjust_link()
1758 napi_disable(&fep->napi); in fec_enet_adjust_link()
1763 napi_enable(&fep->napi); in fec_enet_adjust_link()
1766 if (fep->link) { in fec_enet_adjust_link()
1767 napi_disable(&fep->napi); in fec_enet_adjust_link()
1771 napi_enable(&fep->napi); in fec_enet_adjust_link()
1772 fep->link = phy_dev->link; in fec_enet_adjust_link()
1783 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_read() local
1784 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_read()
1792 fep->mii_timeout = 0; in fec_enet_mdio_read()
1793 reinit_completion(&fep->mdio_done); in fec_enet_mdio_read()
1798 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); in fec_enet_mdio_read()
1801 time_left = wait_for_completion_timeout(&fep->mdio_done, in fec_enet_mdio_read()
1804 fep->mii_timeout = 1; in fec_enet_mdio_read()
1805 netdev_err(fep->netdev, "MDIO read timeout\n"); in fec_enet_mdio_read()
1810 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); in fec_enet_mdio_read()
1822 struct fec_enet_private *fep = bus->priv; in fec_enet_mdio_write() local
1823 struct device *dev = &fep->pdev->dev; in fec_enet_mdio_write()
1833 fep->mii_timeout = 0; in fec_enet_mdio_write()
1834 reinit_completion(&fep->mdio_done); in fec_enet_mdio_write()
1840 fep->hwp + FEC_MII_DATA); in fec_enet_mdio_write()
1843 time_left = wait_for_completion_timeout(&fep->mdio_done, in fec_enet_mdio_write()
1846 fep->mii_timeout = 1; in fec_enet_mdio_write()
1847 netdev_err(fep->netdev, "MDIO write timeout\n"); in fec_enet_mdio_write()
1859 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_clk_enable() local
1863 ret = clk_prepare_enable(fep->clk_ahb); in fec_enet_clk_enable()
1866 if (fep->clk_enet_out) { in fec_enet_clk_enable()
1867 ret = clk_prepare_enable(fep->clk_enet_out); in fec_enet_clk_enable()
1871 if (fep->clk_ptp) { in fec_enet_clk_enable()
1872 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1873 ret = clk_prepare_enable(fep->clk_ptp); in fec_enet_clk_enable()
1875 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1878 fep->ptp_clk_on = true; in fec_enet_clk_enable()
1880 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1882 if (fep->clk_ref) { in fec_enet_clk_enable()
1883 ret = clk_prepare_enable(fep->clk_ref); in fec_enet_clk_enable()
1888 clk_disable_unprepare(fep->clk_ahb); in fec_enet_clk_enable()
1889 if (fep->clk_enet_out) in fec_enet_clk_enable()
1890 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1891 if (fep->clk_ptp) { in fec_enet_clk_enable()
1892 mutex_lock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1893 clk_disable_unprepare(fep->clk_ptp); in fec_enet_clk_enable()
1894 fep->ptp_clk_on = false; in fec_enet_clk_enable()
1895 mutex_unlock(&fep->ptp_clk_mutex); in fec_enet_clk_enable()
1897 if (fep->clk_ref) in fec_enet_clk_enable()
1898 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
1904 if (fep->clk_ref) in fec_enet_clk_enable()
1905 clk_disable_unprepare(fep->clk_ref); in fec_enet_clk_enable()
1907 if (fep->clk_enet_out) in fec_enet_clk_enable()
1908 clk_disable_unprepare(fep->clk_enet_out); in fec_enet_clk_enable()
1910 clk_disable_unprepare(fep->clk_ahb); in fec_enet_clk_enable()
1917 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_probe() local
1922 int dev_id = fep->dev_id; in fec_enet_mii_probe()
1924 fep->phy_dev = NULL; in fec_enet_mii_probe()
1926 if (fep->phy_node) { in fec_enet_mii_probe()
1927 phy_dev = of_phy_connect(ndev, fep->phy_node, in fec_enet_mii_probe()
1929 fep->phy_interface); in fec_enet_mii_probe()
1935 if ((fep->mii_bus->phy_mask & (1 << phy_id))) in fec_enet_mii_probe()
1937 if (fep->mii_bus->phy_map[phy_id] == NULL) in fec_enet_mii_probe()
1939 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) in fec_enet_mii_probe()
1943 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); in fec_enet_mii_probe()
1956 fep->phy_interface); in fec_enet_mii_probe()
1965 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { in fec_enet_mii_probe()
1977 fep->phy_dev = phy_dev; in fec_enet_mii_probe()
1978 fep->link = 0; in fec_enet_mii_probe()
1979 fep->full_duplex = 0; in fec_enet_mii_probe()
1982 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), in fec_enet_mii_probe()
1983 fep->phy_dev->irq); in fec_enet_mii_probe()
1992 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_mii_init() local
2013 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { in fec_enet_mii_init()
2016 fep->mii_bus = fec0_mii_bus; in fec_enet_mii_init()
2023 fep->mii_timeout = 0; in fec_enet_mii_init()
2033 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); in fec_enet_mii_init()
2034 if (fep->quirks & FEC_QUIRK_ENET_MAC) in fec_enet_mii_init()
2039 clk_get_rate(fep->clk_ipg)); in fec_enet_mii_init()
2056 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; in fec_enet_mii_init()
2058 fep->phy_speed = mii_speed << 1 | holdtime << 8; in fec_enet_mii_init()
2060 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); in fec_enet_mii_init()
2062 fep->mii_bus = mdiobus_alloc(); in fec_enet_mii_init()
2063 if (fep->mii_bus == NULL) { in fec_enet_mii_init()
2068 fep->mii_bus->name = "fec_enet_mii_bus"; in fec_enet_mii_init()
2069 fep->mii_bus->read = fec_enet_mdio_read; in fec_enet_mii_init()
2070 fep->mii_bus->write = fec_enet_mdio_write; in fec_enet_mii_init()
2071 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in fec_enet_mii_init()
2072 pdev->name, fep->dev_id + 1); in fec_enet_mii_init()
2073 fep->mii_bus->priv = fep; in fec_enet_mii_init()
2074 fep->mii_bus->parent = &pdev->dev; in fec_enet_mii_init()
2076 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); in fec_enet_mii_init()
2077 if (!fep->mii_bus->irq) { in fec_enet_mii_init()
2083 fep->mii_bus->irq[i] = PHY_POLL; in fec_enet_mii_init()
2087 err = of_mdiobus_register(fep->mii_bus, node); in fec_enet_mii_init()
2090 err = mdiobus_register(fep->mii_bus); in fec_enet_mii_init()
2099 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) in fec_enet_mii_init()
2100 fec0_mii_bus = fep->mii_bus; in fec_enet_mii_init()
2105 kfree(fep->mii_bus->irq); in fec_enet_mii_init()
2107 mdiobus_free(fep->mii_bus); in fec_enet_mii_init()
2112 static void fec_enet_mii_remove(struct fec_enet_private *fep) in fec_enet_mii_remove() argument
2115 mdiobus_unregister(fep->mii_bus); in fec_enet_mii_remove()
2116 kfree(fep->mii_bus->irq); in fec_enet_mii_remove()
2117 mdiobus_free(fep->mii_bus); in fec_enet_mii_remove()
2124 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_settings() local
2125 struct phy_device *phydev = fep->phy_dev; in fec_enet_get_settings()
2136 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_settings() local
2137 struct phy_device *phydev = fep->phy_dev; in fec_enet_set_settings()
2148 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_drvinfo() local
2150 strlcpy(info->driver, fep->pdev->dev.driver->name, in fec_enet_get_drvinfo()
2158 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs_len() local
2162 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); in fec_enet_get_regs_len()
2219 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_regs() local
2220 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; in fec_enet_get_regs()
2235 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_ts_info() local
2237 if (fep->bufdesc_ex) { in fec_enet_get_ts_info()
2245 if (fep->ptp_clock) in fec_enet_get_ts_info()
2246 info->phc_index = ptp_clock_index(fep->ptp_clock); in fec_enet_get_ts_info()
2266 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_pauseparam() local
2268 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; in fec_enet_get_pauseparam()
2269 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; in fec_enet_get_pauseparam()
2276 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_pauseparam() local
2278 if (!fep->phy_dev) in fec_enet_set_pauseparam()
2287 fep->pause_flag = 0; in fec_enet_set_pauseparam()
2290 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; in fec_enet_set_pauseparam()
2291 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; in fec_enet_set_pauseparam()
2294 fep->phy_dev->supported |= ADVERTISED_Pause; in fec_enet_set_pauseparam()
2295 fep->phy_dev->advertising |= ADVERTISED_Pause; in fec_enet_set_pauseparam()
2297 fep->phy_dev->supported &= ~ADVERTISED_Pause; in fec_enet_set_pauseparam()
2298 fep->phy_dev->advertising &= ~ADVERTISED_Pause; in fec_enet_set_pauseparam()
2304 phy_start_aneg(fep->phy_dev); in fec_enet_set_pauseparam()
2307 napi_disable(&fep->napi); in fec_enet_set_pauseparam()
2312 napi_enable(&fep->napi); in fec_enet_set_pauseparam()
2387 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_get_ethtool_stats() local
2391 data[i] = readl(fep->hwp + fec_stats[i].offset); in fec_enet_get_ethtool_stats()
2420 struct fec_enet_private *fep = netdev_priv(dev); in fec_enet_nway_reset() local
2421 struct phy_device *phydev = fep->phy_dev; in fec_enet_nway_reset()
2435 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_us_to_itr_clock() local
2437 return us * (fep->itr_clk_rate / 64000) / 1000; in fec_enet_us_to_itr_clock()
2443 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_itr_coal_set() local
2446 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) in fec_enet_itr_coal_set()
2450 if (!fep->rx_time_itr || !fep->rx_pkts_itr || in fec_enet_itr_coal_set()
2451 !fep->tx_time_itr || !fep->tx_pkts_itr) in fec_enet_itr_coal_set()
2461 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); in fec_enet_itr_coal_set()
2462 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); in fec_enet_itr_coal_set()
2463 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); in fec_enet_itr_coal_set()
2464 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); in fec_enet_itr_coal_set()
2469 writel(tx_itr, fep->hwp + FEC_TXIC0); in fec_enet_itr_coal_set()
2470 writel(rx_itr, fep->hwp + FEC_RXIC0); in fec_enet_itr_coal_set()
2471 writel(tx_itr, fep->hwp + FEC_TXIC1); in fec_enet_itr_coal_set()
2472 writel(rx_itr, fep->hwp + FEC_RXIC1); in fec_enet_itr_coal_set()
2473 writel(tx_itr, fep->hwp + FEC_TXIC2); in fec_enet_itr_coal_set()
2474 writel(rx_itr, fep->hwp + FEC_RXIC2); in fec_enet_itr_coal_set()
2480 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_coalesce() local
2482 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) in fec_enet_get_coalesce()
2485 ec->rx_coalesce_usecs = fep->rx_time_itr; in fec_enet_get_coalesce()
2486 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; in fec_enet_get_coalesce()
2488 ec->tx_coalesce_usecs = fep->tx_time_itr; in fec_enet_get_coalesce()
2489 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; in fec_enet_get_coalesce()
2497 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_coalesce() local
2500 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) in fec_enet_set_coalesce()
2513 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); in fec_enet_set_coalesce()
2519 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); in fec_enet_set_coalesce()
2525 fep->rx_time_itr = ec->rx_coalesce_usecs; in fec_enet_set_coalesce()
2526 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; in fec_enet_set_coalesce()
2528 fep->tx_time_itr = ec->tx_coalesce_usecs; in fec_enet_set_coalesce()
2529 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; in fec_enet_set_coalesce()
2553 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_get_tunable() local
2558 *(u32 *)data = fep->rx_copybreak; in fec_enet_get_tunable()
2572 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_tunable() local
2577 fep->rx_copybreak = *(u32 *)data; in fec_enet_set_tunable()
2590 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_get_wol() local
2592 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { in fec_enet_get_wol()
2594 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; in fec_enet_get_wol()
2603 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_set_wol() local
2605 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) in fec_enet_set_wol()
2613 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; in fec_enet_set_wol()
2614 if (fep->irq[0] > 0) in fec_enet_set_wol()
2615 enable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2617 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); in fec_enet_set_wol()
2618 if (fep->irq[0] > 0) in fec_enet_set_wol()
2619 disable_irq_wake(fep->irq[0]); in fec_enet_set_wol()
2651 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_ioctl() local
2652 struct phy_device *phydev = fep->phy_dev; in fec_enet_ioctl()
2660 if (fep->bufdesc_ex) { in fec_enet_ioctl()
2672 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_buffers() local
2680 for (q = 0; q < fep->num_rx_queues; q++) { in fec_enet_free_buffers()
2681 rxq = fep->rx_queue[q]; in fec_enet_free_buffers()
2687 dma_unmap_single(&fep->pdev->dev, in fec_enet_free_buffers()
2689 FEC_ENET_RX_FRSIZE - fep->rx_align, in fec_enet_free_buffers()
2693 bdp = fec_enet_get_nextdesc(bdp, fep, q); in fec_enet_free_buffers()
2697 for (q = 0; q < fep->num_tx_queues; q++) { in fec_enet_free_buffers()
2698 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2712 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_free_queue() local
2716 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2717 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { in fec_enet_free_queue()
2718 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2725 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_free_queue()
2726 kfree(fep->rx_queue[i]); in fec_enet_free_queue()
2727 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_free_queue()
2728 kfree(fep->tx_queue[i]); in fec_enet_free_queue()
2733 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_queue() local
2738 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_alloc_queue()
2745 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2747 fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; in fec_enet_alloc_queue()
2763 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_alloc_queue()
2764 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), in fec_enet_alloc_queue()
2766 if (!fep->rx_queue[i]) { in fec_enet_alloc_queue()
2771 fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; in fec_enet_alloc_queue()
2772 fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; in fec_enet_alloc_queue()
2784 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_rxq_buffers() local
2790 rxq = fep->rx_queue[queue]; in fec_enet_alloc_rxq_buffers()
2805 if (fep->bufdesc_ex) { in fec_enet_alloc_rxq_buffers()
2810 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_alloc_rxq_buffers()
2814 bdp = fec_enet_get_prevdesc(bdp, fep, queue); in fec_enet_alloc_rxq_buffers()
2826 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_txq_buffers() local
2831 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2841 if (fep->bufdesc_ex) { in fec_enet_alloc_txq_buffers()
2846 bdp = fec_enet_get_nextdesc(bdp, fep, queue); in fec_enet_alloc_txq_buffers()
2850 bdp = fec_enet_get_prevdesc(bdp, fep, queue); in fec_enet_alloc_txq_buffers()
2862 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_alloc_buffers() local
2865 for (i = 0; i < fep->num_rx_queues; i++) in fec_enet_alloc_buffers()
2869 for (i = 0; i < fep->num_tx_queues; i++) in fec_enet_alloc_buffers()
2878 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_open() local
2881 ret = pm_runtime_get_sync(&fep->pdev->dev); in fec_enet_open()
2885 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_enet_open()
2906 napi_enable(&fep->napi); in fec_enet_open()
2907 phy_start(fep->phy_dev); in fec_enet_open()
2910 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & in fec_enet_open()
2920 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_open()
2921 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_open()
2922 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_open()
2929 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_close() local
2931 phy_stop(fep->phy_dev); in fec_enet_close()
2934 napi_disable(&fep->napi); in fec_enet_close()
2939 phy_disconnect(fep->phy_dev); in fec_enet_close()
2940 fep->phy_dev = NULL; in fec_enet_close()
2943 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_enet_close()
2944 pm_runtime_mark_last_busy(&fep->pdev->dev); in fec_enet_close()
2945 pm_runtime_put_autosuspend(&fep->pdev->dev); in fec_enet_close()
2967 struct fec_enet_private *fep = netdev_priv(ndev); in set_multicast_list() local
2973 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
2975 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
2979 tmp = readl(fep->hwp + FEC_R_CNTRL); in set_multicast_list()
2981 writel(tmp, fep->hwp + FEC_R_CNTRL); in set_multicast_list()
2987 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
2988 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
2995 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
2996 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3016 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3018 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); in set_multicast_list()
3020 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3022 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); in set_multicast_list()
3031 struct fec_enet_private *fep = netdev_priv(ndev); in fec_set_mac_address() local
3050 fep->hwp + FEC_ADDR_LOW); in fec_set_mac_address()
3052 fep->hwp + FEC_ADDR_HIGH); in fec_set_mac_address()
3067 struct fec_enet_private *fep = netdev_priv(dev); in fec_poll_controller() local
3070 if (fep->irq[i] > 0) { in fec_poll_controller()
3071 disable_irq(fep->irq[i]); in fec_poll_controller()
3072 fec_enet_interrupt(fep->irq[i], dev); in fec_poll_controller()
3073 enable_irq(fep->irq[i]); in fec_poll_controller()
3082 struct fec_enet_private *fep = netdev_priv(netdev); in fec_enet_set_netdev_features() local
3090 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3092 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; in fec_enet_set_netdev_features()
3099 struct fec_enet_private *fep = netdev_priv(netdev); in fec_set_features() local
3103 napi_disable(&fep->napi); in fec_set_features()
3110 napi_enable(&fep->napi); in fec_set_features()
3140 struct fec_enet_private *fep = netdev_priv(ndev); in fec_enet_init() local
3149 fep->rx_align = 0xf; in fec_enet_init()
3150 fep->tx_align = 0xf; in fec_enet_init()
3152 fep->rx_align = 0x3; in fec_enet_init()
3153 fep->tx_align = 0x3; in fec_enet_init()
3158 if (fep->bufdesc_ex) in fec_enet_init()
3159 fep->bufdesc_size = sizeof(struct bufdesc_ex); in fec_enet_init()
3161 fep->bufdesc_size = sizeof(struct bufdesc); in fec_enet_init()
3162 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * in fec_enet_init()
3163 fep->bufdesc_size; in fec_enet_init()
3166 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, in fec_enet_init()
3180 for (i = 0; i < fep->num_rx_queues; i++) { in fec_enet_init()
3181 rxq = fep->rx_queue[i]; in fec_enet_init()
3185 if (fep->bufdesc_ex) { in fec_enet_init()
3195 for (i = 0; i < fep->num_tx_queues; i++) { in fec_enet_init()
3196 txq = fep->tx_queue[i]; in fec_enet_init()
3200 if (fep->bufdesc_ex) { in fec_enet_init()
3216 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); in fec_enet_init()
3217 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); in fec_enet_init()
3219 if (fep->quirks & FEC_QUIRK_HAS_VLAN) in fec_enet_init()
3223 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { in fec_enet_init()
3229 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; in fec_enet_init()
3232 if (fep->quirks & FEC_QUIRK_HAS_AVB) { in fec_enet_init()
3233 fep->tx_align = 0; in fec_enet_init()
3234 fep->rx_align = 0x3f; in fec_enet_init()
3321 struct fec_enet_private *fep; in fec_probe() local
3343 fep = netdev_priv(ndev); in fec_probe()
3348 fep->quirks = pdev->id_entry->driver_data; in fec_probe()
3350 fep->netdev = ndev; in fec_probe()
3351 fep->num_rx_queues = num_rx_qs; in fec_probe()
3352 fep->num_tx_queues = num_tx_qs; in fec_probe()
3356 if (fep->quirks & FEC_QUIRK_HAS_GBIT) in fec_probe()
3357 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; in fec_probe()
3364 fep->hwp = devm_ioremap_resource(&pdev->dev, r); in fec_probe()
3365 if (IS_ERR(fep->hwp)) { in fec_probe()
3366 ret = PTR_ERR(fep->hwp); in fec_probe()
3370 fep->pdev = pdev; in fec_probe()
3371 fep->dev_id = dev_id++; in fec_probe()
3376 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; in fec_probe()
3388 fep->phy_node = phy_node; in fec_probe()
3394 fep->phy_interface = pdata->phy; in fec_probe()
3396 fep->phy_interface = PHY_INTERFACE_MODE_MII; in fec_probe()
3398 fep->phy_interface = ret; in fec_probe()
3401 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); in fec_probe()
3402 if (IS_ERR(fep->clk_ipg)) { in fec_probe()
3403 ret = PTR_ERR(fep->clk_ipg); in fec_probe()
3407 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); in fec_probe()
3408 if (IS_ERR(fep->clk_ahb)) { in fec_probe()
3409 ret = PTR_ERR(fep->clk_ahb); in fec_probe()
3413 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); in fec_probe()
3416 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); in fec_probe()
3417 if (IS_ERR(fep->clk_enet_out)) in fec_probe()
3418 fep->clk_enet_out = NULL; in fec_probe()
3420 fep->ptp_clk_on = false; in fec_probe()
3421 mutex_init(&fep->ptp_clk_mutex); in fec_probe()
3424 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); in fec_probe()
3425 if (IS_ERR(fep->clk_ref)) in fec_probe()
3426 fep->clk_ref = NULL; in fec_probe()
3428 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; in fec_probe()
3429 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); in fec_probe()
3430 if (IS_ERR(fep->clk_ptp)) { in fec_probe()
3431 fep->clk_ptp = NULL; in fec_probe()
3432 fep->bufdesc_ex = false; in fec_probe()
3439 ret = clk_prepare_enable(fep->clk_ipg); in fec_probe()
3443 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); in fec_probe()
3444 if (!IS_ERR(fep->reg_phy)) { in fec_probe()
3445 ret = regulator_enable(fep->reg_phy); in fec_probe()
3452 fep->reg_phy = NULL; in fec_probe()
3463 if (fep->bufdesc_ex) in fec_probe()
3483 fep->irq[i] = irq; in fec_probe()
3486 init_completion(&fep->mdio_done); in fec_probe()
3500 device_init_wakeup(&ndev->dev, fep->wol_flag & in fec_probe()
3503 if (fep->bufdesc_ex && fep->ptp_clock) in fec_probe()
3504 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); in fec_probe()
3506 fep->rx_copybreak = COPYBREAK_DEFAULT; in fec_probe()
3507 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); in fec_probe()
3515 fec_enet_mii_remove(fep); in fec_probe()
3520 if (fep->reg_phy) in fec_probe()
3521 regulator_disable(fep->reg_phy); in fec_probe()
3523 clk_disable_unprepare(fep->clk_ipg); in fec_probe()
3539 struct fec_enet_private *fep = netdev_priv(ndev); in fec_drv_remove() local
3541 cancel_work_sync(&fep->tx_timeout_work); in fec_drv_remove()
3544 fec_enet_mii_remove(fep); in fec_drv_remove()
3545 if (fep->reg_phy) in fec_drv_remove()
3546 regulator_disable(fep->reg_phy); in fec_drv_remove()
3547 of_node_put(fep->phy_node); in fec_drv_remove()
3556 struct fec_enet_private *fep = netdev_priv(ndev); in fec_suspend() local
3560 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) in fec_suspend()
3561 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; in fec_suspend()
3562 phy_stop(fep->phy_dev); in fec_suspend()
3563 napi_disable(&fep->napi); in fec_suspend()
3569 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3570 pinctrl_pm_select_sleep_state(&fep->pdev->dev); in fec_suspend()
3574 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) in fec_suspend()
3575 regulator_disable(fep->reg_phy); in fec_suspend()
3580 if (fep->clk_enet_out || fep->reg_phy) in fec_suspend()
3581 fep->link = 0; in fec_suspend()
3589 struct fec_enet_private *fep = netdev_priv(ndev); in fec_resume() local
3590 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; in fec_resume()
3594 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { in fec_resume()
3595 ret = regulator_enable(fep->reg_phy); in fec_resume()
3607 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { in fec_resume()
3610 val = readl(fep->hwp + FEC_ECNTRL); in fec_resume()
3612 writel(val, fep->hwp + FEC_ECNTRL); in fec_resume()
3613 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; in fec_resume()
3615 pinctrl_pm_select_default_state(&fep->pdev->dev); in fec_resume()
3621 napi_enable(&fep->napi); in fec_resume()
3622 phy_start(fep->phy_dev); in fec_resume()
3629 if (fep->reg_phy) in fec_resume()
3630 regulator_disable(fep->reg_phy); in fec_resume()
3637 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_suspend() local
3639 clk_disable_unprepare(fep->clk_ipg); in fec_runtime_suspend()
3647 struct fec_enet_private *fep = netdev_priv(ndev); in fec_runtime_resume() local
3649 return clk_prepare_enable(fep->clk_ipg); in fec_runtime_resume()