Searched refs:fep (Results 1 - 20 of 20) sorted by relevance

/linux-4.4.14/drivers/net/ethernet/freescale/
H A Dfec_main.c225 struct fec_enet_private *fep, fec_enet_get_nextdesc()
230 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; fec_enet_get_nextdesc()
231 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; fec_enet_get_nextdesc()
246 if (fep->bufdesc_ex) fec_enet_get_nextdesc()
256 struct fec_enet_private *fep, fec_enet_get_prevdesc()
261 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; fec_enet_get_prevdesc()
262 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; fec_enet_get_prevdesc()
277 if (fep->bufdesc_ex) fec_enet_get_prevdesc()
285 struct fec_enet_private *fep) fec_enet_get_bd_index()
287 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; fec_enet_get_bd_index()
290 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, fec_enet_get_free_txdesc_num() argument
296 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; fec_enet_get_free_txdesc_num()
322 struct fec_enet_private *fep = netdev_priv(ndev); fec_dump() local
330 txq = fep->tx_queue[0]; fec_dump()
340 bdp = fec_enet_get_nextdesc(bdp, fep, 0); fec_dump()
372 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_txq_submit_frag_skb() local
388 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_txq_submit_frag_skb()
399 if (fep->bufdesc_ex) { fec_enet_txq_submit_frag_skb()
402 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) fec_enet_txq_submit_frag_skb()
407 if (fep->bufdesc_ex) { fec_enet_txq_submit_frag_skb()
408 if (fep->quirks & FEC_QUIRK_HAS_AVB) fec_enet_txq_submit_frag_skb()
418 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); fec_enet_txq_submit_frag_skb()
419 if (((unsigned long) bufaddr) & fep->tx_align || fec_enet_txq_submit_frag_skb()
420 fep->quirks & FEC_QUIRK_SWAP_FRAME) { fec_enet_txq_submit_frag_skb()
424 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) fec_enet_txq_submit_frag_skb()
428 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, fec_enet_txq_submit_frag_skb()
430 if (dma_mapping_error(&fep->pdev->dev, addr)) { fec_enet_txq_submit_frag_skb()
446 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_txq_submit_frag_skb()
447 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, fec_enet_txq_submit_frag_skb()
456 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_txq_submit_skb() local
468 entries_free = fec_enet_get_free_txdesc_num(fep, txq); fec_enet_txq_submit_skb()
493 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); fec_enet_txq_submit_skb()
494 if (((unsigned long) bufaddr) & fep->tx_align || fec_enet_txq_submit_skb()
495 fep->quirks & FEC_QUIRK_SWAP_FRAME) { fec_enet_txq_submit_skb()
499 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) fec_enet_txq_submit_skb()
504 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); fec_enet_txq_submit_skb()
505 if (dma_mapping_error(&fep->pdev->dev, addr)) { fec_enet_txq_submit_skb()
518 if (fep->bufdesc_ex) { fec_enet_txq_submit_skb()
521 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) fec_enet_txq_submit_skb()
526 if (fep->bufdesc_ex) { fec_enet_txq_submit_skb()
531 fep->hwts_tx_en)) fec_enet_txq_submit_skb()
534 if (fep->quirks & FEC_QUIRK_HAS_AVB) fec_enet_txq_submit_skb()
544 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); fec_enet_txq_submit_skb()
558 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); fec_enet_txq_submit_skb()
569 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); fec_enet_txq_submit_skb()
580 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_txq_put_data_tso() local
592 if (((unsigned long) data) & fep->tx_align || fec_enet_txq_put_data_tso()
593 fep->quirks & FEC_QUIRK_SWAP_FRAME) { fec_enet_txq_put_data_tso()
597 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) fec_enet_txq_put_data_tso()
601 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); fec_enet_txq_put_data_tso()
602 if (dma_mapping_error(&fep->pdev->dev, addr)) { fec_enet_txq_put_data_tso()
612 if (fep->bufdesc_ex) { fec_enet_txq_put_data_tso()
613 if (fep->quirks & FEC_QUIRK_HAS_AVB) fec_enet_txq_put_data_tso()
626 if (fep->bufdesc_ex) fec_enet_txq_put_data_tso()
640 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_txq_put_hdr_tso() local
655 if (((unsigned long)bufaddr) & fep->tx_align || fec_enet_txq_put_hdr_tso()
656 fep->quirks & FEC_QUIRK_SWAP_FRAME) { fec_enet_txq_put_hdr_tso()
660 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) fec_enet_txq_put_hdr_tso()
663 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, fec_enet_txq_put_hdr_tso()
665 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { fec_enet_txq_put_hdr_tso()
676 if (fep->bufdesc_ex) { fec_enet_txq_put_hdr_tso()
677 if (fep->quirks & FEC_QUIRK_HAS_AVB) fec_enet_txq_put_hdr_tso()
694 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_txq_submit_tso() local
703 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { fec_enet_txq_submit_tso()
723 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); fec_enet_txq_submit_tso()
738 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_txq_submit_tso()
740 bdp, fep); fec_enet_txq_submit_tso()
753 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_txq_submit_tso()
763 if (!(fep->quirks & FEC_QUIRK_ERR007885) || fec_enet_txq_submit_tso()
764 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || fec_enet_txq_submit_tso()
765 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || fec_enet_txq_submit_tso()
766 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || fec_enet_txq_submit_tso()
767 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) fec_enet_txq_submit_tso()
768 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); fec_enet_txq_submit_tso()
780 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_start_xmit() local
788 txq = fep->tx_queue[queue]; fec_enet_start_xmit()
798 entries_free = fec_enet_get_free_txdesc_num(fep, txq); fec_enet_start_xmit()
809 struct fec_enet_private *fep = netdev_priv(dev); fec_enet_bd_init() local
816 for (q = 0; q < fep->num_rx_queues; q++) { fec_enet_bd_init()
818 rxq = fep->rx_queue[q]; fec_enet_bd_init()
828 bdp = fec_enet_get_nextdesc(bdp, fep, q); fec_enet_bd_init()
832 bdp = fec_enet_get_prevdesc(bdp, fep, q); fec_enet_bd_init()
838 for (q = 0; q < fep->num_tx_queues; q++) { fec_enet_bd_init()
840 txq = fep->tx_queue[q]; fec_enet_bd_init()
852 bdp = fec_enet_get_nextdesc(bdp, fep, q); fec_enet_bd_init()
856 bdp = fec_enet_get_prevdesc(bdp, fep, q); fec_enet_bd_init()
864 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_active_rxring() local
867 for (i = 0; i < fep->num_rx_queues; i++) fec_enet_active_rxring()
868 writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); fec_enet_active_rxring()
873 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_enable_ring() local
878 for (i = 0; i < fep->num_rx_queues; i++) { fec_enet_enable_ring()
879 rxq = fep->rx_queue[i]; fec_enet_enable_ring()
880 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); fec_enet_enable_ring()
881 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); fec_enet_enable_ring()
886 fep->hwp + FEC_RCMR(i)); fec_enet_enable_ring()
889 for (i = 0; i < fep->num_tx_queues; i++) { fec_enet_enable_ring()
890 txq = fep->tx_queue[i]; fec_enet_enable_ring()
891 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); fec_enet_enable_ring()
896 fep->hwp + FEC_DMA_CFG(i)); fec_enet_enable_ring()
902 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_reset_skb() local
906 for (i = 0; i < fep->num_tx_queues; i++) { fec_enet_reset_skb()
907 txq = fep->tx_queue[i]; fec_enet_reset_skb()
926 struct fec_enet_private *fep = netdev_priv(ndev); fec_restart() local
936 if (fep->quirks & FEC_QUIRK_HAS_AVB) { fec_restart()
937 writel(0, fep->hwp + FEC_ECNTRL); fec_restart()
939 writel(1, fep->hwp + FEC_ECNTRL); fec_restart()
947 if (fep->quirks & FEC_QUIRK_ENET_MAC) { fec_restart()
949 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); fec_restart()
950 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); fec_restart()
954 writel(0xffffffff, fep->hwp + FEC_IEVENT); fec_restart()
964 if (fep->full_duplex == DUPLEX_FULL) { fec_restart()
966 writel(0x04, fep->hwp + FEC_X_CNTRL); fec_restart()
970 writel(0x0, fep->hwp + FEC_X_CNTRL); fec_restart()
974 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); fec_restart()
977 if (fep->quirks & FEC_QUIRK_HAS_RACC) { fec_restart()
979 val = readl(fep->hwp + FEC_RACC); fec_restart()
980 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) fec_restart()
984 writel(val, fep->hwp + FEC_RACC); fec_restart()
992 if (fep->quirks & FEC_QUIRK_ENET_MAC) { fec_restart()
997 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || fec_restart()
998 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || fec_restart()
999 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || fec_restart()
1000 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) fec_restart()
1002 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) fec_restart()
1008 if (fep->phy_dev) { fec_restart()
1009 if (fep->phy_dev->speed == SPEED_1000) fec_restart()
1011 else if (fep->phy_dev->speed == SPEED_100) fec_restart()
1018 if (fep->quirks & FEC_QUIRK_USE_GASKET) { fec_restart()
1021 writel(0, fep->hwp + FEC_MIIGSK_ENR); fec_restart()
1022 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) fec_restart()
1030 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) fec_restart()
1032 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) fec_restart()
1034 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); fec_restart()
1037 writel(2, fep->hwp + FEC_MIIGSK_ENR); fec_restart()
1044 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || fec_restart()
1045 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && fec_restart()
1046 fep->phy_dev && fep->phy_dev->pause)) { fec_restart()
1050 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); fec_restart()
1051 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); fec_restart()
1052 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); fec_restart()
1053 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); fec_restart()
1056 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); fec_restart()
1062 writel(rcntl, fep->hwp + FEC_R_CNTRL); fec_restart()
1067 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); fec_restart()
1068 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); fec_restart()
1071 if (fep->quirks & FEC_QUIRK_ENET_MAC) { fec_restart()
1075 writel(1 << 8, fep->hwp + FEC_X_WMRK); fec_restart()
1078 if (fep->bufdesc_ex) fec_restart()
1083 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); fec_restart()
1087 writel(ecntl, fep->hwp + FEC_ECNTRL); fec_restart()
1090 if (fep->bufdesc_ex) fec_restart()
1094 if (fep->link) fec_restart()
1095 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); fec_restart()
1097 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); fec_restart()
1107 struct fec_enet_private *fep = netdev_priv(ndev); fec_stop() local
1108 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; fec_stop()
1109 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); fec_stop()
1113 if (fep->link) { fec_stop()
1114 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ fec_stop()
1116 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) fec_stop()
1124 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { fec_stop()
1125 if (fep->quirks & FEC_QUIRK_HAS_AVB) { fec_stop()
1126 writel(0, fep->hwp + FEC_ECNTRL); fec_stop()
1128 writel(1, fep->hwp + FEC_ECNTRL); fec_stop()
1131 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); fec_stop()
1133 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); fec_stop()
1134 val = readl(fep->hwp + FEC_ECNTRL); fec_stop()
1136 writel(val, fep->hwp + FEC_ECNTRL); fec_stop()
1141 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); fec_stop()
1144 if (fep->quirks & FEC_QUIRK_ENET_MAC && fec_stop()
1145 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { fec_stop()
1146 writel(2, fep->hwp + FEC_ECNTRL); fec_stop()
1147 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); fec_stop()
1155 struct fec_enet_private *fep = netdev_priv(ndev); fec_timeout() local
1161 schedule_work(&fep->tx_timeout_work); fec_timeout()
1166 struct fec_enet_private *fep = fec_enet_timeout_work() local
1168 struct net_device *ndev = fep->netdev; fec_enet_timeout_work()
1172 napi_disable(&fep->napi); fec_enet_timeout_work()
1177 napi_enable(&fep->napi); fec_enet_timeout_work()
1183 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, fec_enet_hwtstamp() argument
1189 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_enet_hwtstamp()
1190 ns = timecounter_cyc2time(&fep->tc, ts); fec_enet_hwtstamp()
1191 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_enet_hwtstamp()
1200 struct fec_enet_private *fep; fec_enet_tx_queue() local
1209 fep = netdev_priv(ndev); fec_enet_tx_queue()
1213 txq = fep->tx_queue[queue_id]; fec_enet_tx_queue()
1219 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); fec_enet_tx_queue()
1228 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); fec_enet_tx_queue()
1233 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, fec_enet_tx_queue()
1237 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); fec_enet_tx_queue()
1262 fep->bufdesc_ex) { fec_enet_tx_queue()
1266 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); fec_enet_tx_queue()
1286 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); fec_enet_tx_queue()
1291 entries_free = fec_enet_get_free_txdesc_num(fep, txq); fec_enet_tx_queue()
1299 readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) fec_enet_tx_queue()
1300 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); fec_enet_tx_queue()
1306 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_tx() local
1309 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { fec_enet_tx()
1310 clear_bit(queue_id, &fep->work_tx); fec_enet_tx()
1319 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_new_rxbdp() local
1322 off = ((unsigned long)skb->data) & fep->rx_align; fec_enet_new_rxbdp()
1324 skb_reserve(skb, fep->rx_align + 1 - off); fec_enet_new_rxbdp()
1326 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, fec_enet_new_rxbdp()
1327 FEC_ENET_RX_FRSIZE - fep->rx_align, fec_enet_new_rxbdp()
1329 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { fec_enet_new_rxbdp()
1341 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_copybreak() local
1344 if (length > fep->rx_copybreak) fec_enet_copybreak()
1351 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, fec_enet_copybreak()
1352 FEC_ENET_RX_FRSIZE - fep->rx_align, fec_enet_copybreak()
1371 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_rx_queue() local
1385 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; fec_enet_rx_queue()
1391 rxq = fep->rx_queue[queue_id]; fec_enet_rx_queue()
1410 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); fec_enet_rx_queue()
1443 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); fec_enet_rx_queue()
1458 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, fec_enet_rx_queue()
1459 FEC_ENET_RX_FRSIZE - fep->rx_align, fec_enet_rx_queue()
1471 if (fep->bufdesc_ex) fec_enet_rx_queue()
1477 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { fec_enet_rx_queue()
1492 if (fep->hwts_rx_en && fep->bufdesc_ex) fec_enet_rx_queue()
1493 fec_enet_hwtstamp(fep, ebdp->ts, fec_enet_rx_queue()
1496 if (fep->bufdesc_ex && fec_enet_rx_queue()
1497 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { fec_enet_rx_queue()
1512 napi_gro_receive(&fep->napi, skb); fec_enet_rx_queue()
1515 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, fec_enet_rx_queue()
1516 FEC_ENET_RX_FRSIZE - fep->rx_align, fec_enet_rx_queue()
1531 if (fep->bufdesc_ex) { fec_enet_rx_queue()
1540 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); fec_enet_rx_queue()
1546 writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); fec_enet_rx_queue()
1557 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_rx() local
1559 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { fec_enet_rx()
1566 clear_bit(queue_id, &fep->work_rx); fec_enet_rx()
1574 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) fec_enet_collect_events() argument
1580 fep->work_rx |= (1 << 2); fec_enet_collect_events()
1582 fep->work_rx |= (1 << 0); fec_enet_collect_events()
1584 fep->work_rx |= (1 << 1); fec_enet_collect_events()
1587 fep->work_tx |= (1 << 2); fec_enet_collect_events()
1589 fep->work_tx |= (1 << 0); fec_enet_collect_events()
1591 fep->work_tx |= (1 << 1); fec_enet_collect_events()
1600 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_interrupt() local
1604 int_events = readl(fep->hwp + FEC_IEVENT); fec_enet_interrupt()
1605 writel(int_events, fep->hwp + FEC_IEVENT); fec_enet_interrupt()
1606 fec_enet_collect_events(fep, int_events); fec_enet_interrupt()
1608 if ((fep->work_tx || fep->work_rx) && fep->link) { fec_enet_interrupt()
1611 if (napi_schedule_prep(&fep->napi)) { fec_enet_interrupt()
1613 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); fec_enet_interrupt()
1614 __napi_schedule(&fep->napi); fec_enet_interrupt()
1620 complete(&fep->mdio_done); fec_enet_interrupt()
1623 if (fep->ptp_clock) fec_enet_interrupt()
1624 fec_ptp_check_pps_event(fep); fec_enet_interrupt()
1632 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_rx_napi() local
1641 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); fec_enet_rx_napi()
1649 struct fec_enet_private *fep = netdev_priv(ndev); fec_get_mac() local
1650 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); fec_get_mac()
1665 struct device_node *np = fep->pdev->dev.of_node; fec_get_mac()
1691 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); fec_get_mac()
1693 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); fec_get_mac()
1713 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; fec_get_mac()
1723 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_adjust_link() local
1724 struct phy_device *phy_dev = fep->phy_dev; fec_enet_adjust_link()
1728 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { fec_enet_adjust_link()
1739 fep->link = 0; fec_enet_adjust_link()
1741 if (!fep->link) { fec_enet_adjust_link()
1742 fep->link = phy_dev->link; fec_enet_adjust_link()
1746 if (fep->full_duplex != phy_dev->duplex) { fec_enet_adjust_link()
1747 fep->full_duplex = phy_dev->duplex; fec_enet_adjust_link()
1751 if (phy_dev->speed != fep->speed) { fec_enet_adjust_link()
1752 fep->speed = phy_dev->speed; fec_enet_adjust_link()
1758 napi_disable(&fep->napi); fec_enet_adjust_link()
1763 napi_enable(&fep->napi); fec_enet_adjust_link()
1766 if (fep->link) { fec_enet_adjust_link()
1767 napi_disable(&fep->napi); fec_enet_adjust_link()
1771 napi_enable(&fep->napi); fec_enet_adjust_link()
1772 fep->link = phy_dev->link; fec_enet_adjust_link()
1783 struct fec_enet_private *fep = bus->priv; fec_enet_mdio_read() local
1784 struct device *dev = &fep->pdev->dev; fec_enet_mdio_read()
1792 fep->mii_timeout = 0; fec_enet_mdio_read()
1793 reinit_completion(&fep->mdio_done); fec_enet_mdio_read()
1798 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); fec_enet_mdio_read()
1801 time_left = wait_for_completion_timeout(&fep->mdio_done, fec_enet_mdio_read()
1804 fep->mii_timeout = 1; fec_enet_mdio_read()
1805 netdev_err(fep->netdev, "MDIO read timeout\n"); fec_enet_mdio_read()
1810 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); fec_enet_mdio_read()
1822 struct fec_enet_private *fep = bus->priv; fec_enet_mdio_write() local
1823 struct device *dev = &fep->pdev->dev; fec_enet_mdio_write()
1833 fep->mii_timeout = 0; fec_enet_mdio_write()
1834 reinit_completion(&fep->mdio_done); fec_enet_mdio_write()
1840 fep->hwp + FEC_MII_DATA); fec_enet_mdio_write()
1843 time_left = wait_for_completion_timeout(&fep->mdio_done, fec_enet_mdio_write()
1846 fep->mii_timeout = 1; fec_enet_mdio_write()
1847 netdev_err(fep->netdev, "MDIO write timeout\n"); fec_enet_mdio_write()
1859 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_clk_enable() local
1863 ret = clk_prepare_enable(fep->clk_ahb); fec_enet_clk_enable()
1866 if (fep->clk_enet_out) { fec_enet_clk_enable()
1867 ret = clk_prepare_enable(fep->clk_enet_out); fec_enet_clk_enable()
1871 if (fep->clk_ptp) { fec_enet_clk_enable()
1872 mutex_lock(&fep->ptp_clk_mutex); fec_enet_clk_enable()
1873 ret = clk_prepare_enable(fep->clk_ptp); fec_enet_clk_enable()
1875 mutex_unlock(&fep->ptp_clk_mutex); fec_enet_clk_enable()
1878 fep->ptp_clk_on = true; fec_enet_clk_enable()
1880 mutex_unlock(&fep->ptp_clk_mutex); fec_enet_clk_enable()
1882 if (fep->clk_ref) { fec_enet_clk_enable()
1883 ret = clk_prepare_enable(fep->clk_ref); fec_enet_clk_enable()
1888 clk_disable_unprepare(fep->clk_ahb); fec_enet_clk_enable()
1889 if (fep->clk_enet_out) fec_enet_clk_enable()
1890 clk_disable_unprepare(fep->clk_enet_out); fec_enet_clk_enable()
1891 if (fep->clk_ptp) { fec_enet_clk_enable()
1892 mutex_lock(&fep->ptp_clk_mutex); fec_enet_clk_enable()
1893 clk_disable_unprepare(fep->clk_ptp); fec_enet_clk_enable()
1894 fep->ptp_clk_on = false; fec_enet_clk_enable()
1895 mutex_unlock(&fep->ptp_clk_mutex); fec_enet_clk_enable()
1897 if (fep->clk_ref) fec_enet_clk_enable()
1898 clk_disable_unprepare(fep->clk_ref); fec_enet_clk_enable()
1904 if (fep->clk_ref) fec_enet_clk_enable()
1905 clk_disable_unprepare(fep->clk_ref); fec_enet_clk_enable()
1907 if (fep->clk_enet_out) fec_enet_clk_enable()
1908 clk_disable_unprepare(fep->clk_enet_out); fec_enet_clk_enable()
1910 clk_disable_unprepare(fep->clk_ahb); fec_enet_clk_enable()
1917 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_mii_probe() local
1922 int dev_id = fep->dev_id; fec_enet_mii_probe()
1924 fep->phy_dev = NULL; fec_enet_mii_probe()
1926 if (fep->phy_node) { fec_enet_mii_probe()
1927 phy_dev = of_phy_connect(ndev, fep->phy_node, fec_enet_mii_probe()
1929 fep->phy_interface); fec_enet_mii_probe()
1935 if ((fep->mii_bus->phy_mask & (1 << phy_id))) fec_enet_mii_probe()
1937 if (fep->mii_bus->phy_map[phy_id] == NULL) fec_enet_mii_probe()
1939 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) fec_enet_mii_probe()
1943 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); fec_enet_mii_probe()
1956 fep->phy_interface); fec_enet_mii_probe()
1965 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { fec_enet_mii_probe()
1977 fep->phy_dev = phy_dev; fec_enet_mii_probe()
1978 fep->link = 0; fec_enet_mii_probe()
1979 fep->full_duplex = 0; fec_enet_mii_probe()
1982 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fec_enet_mii_probe()
1983 fep->phy_dev->irq); fec_enet_mii_probe()
1992 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_mii_init() local
2013 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { fec_enet_mii_init()
2016 fep->mii_bus = fec0_mii_bus; fec_enet_mii_init()
2023 fep->mii_timeout = 0; fec_enet_mii_init()
2033 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); fec_enet_mii_init()
2034 if (fep->quirks & FEC_QUIRK_ENET_MAC) fec_enet_mii_init()
2039 clk_get_rate(fep->clk_ipg)); fec_enet_mii_init()
2056 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; fec_enet_mii_init()
2058 fep->phy_speed = mii_speed << 1 | holdtime << 8; fec_enet_mii_init()
2060 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); fec_enet_mii_init()
2062 fep->mii_bus = mdiobus_alloc(); fec_enet_mii_init()
2063 if (fep->mii_bus == NULL) { fec_enet_mii_init()
2068 fep->mii_bus->name = "fec_enet_mii_bus"; fec_enet_mii_init()
2069 fep->mii_bus->read = fec_enet_mdio_read; fec_enet_mii_init()
2070 fep->mii_bus->write = fec_enet_mdio_write; fec_enet_mii_init()
2071 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", fec_enet_mii_init()
2072 pdev->name, fep->dev_id + 1); fec_enet_mii_init()
2073 fep->mii_bus->priv = fep; fec_enet_mii_init()
2074 fep->mii_bus->parent = &pdev->dev; fec_enet_mii_init()
2076 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); fec_enet_mii_init()
2077 if (!fep->mii_bus->irq) { fec_enet_mii_init()
2083 fep->mii_bus->irq[i] = PHY_POLL; fec_enet_mii_init()
2087 err = of_mdiobus_register(fep->mii_bus, node); fec_enet_mii_init()
2090 err = mdiobus_register(fep->mii_bus); fec_enet_mii_init()
2099 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) fec_enet_mii_init()
2100 fec0_mii_bus = fep->mii_bus; fec_enet_mii_init()
2105 kfree(fep->mii_bus->irq); fec_enet_mii_init()
2107 mdiobus_free(fep->mii_bus); fec_enet_mii_init()
2112 static void fec_enet_mii_remove(struct fec_enet_private *fep) fec_enet_mii_remove() argument
2115 mdiobus_unregister(fep->mii_bus); fec_enet_mii_remove()
2116 kfree(fep->mii_bus->irq); fec_enet_mii_remove()
2117 mdiobus_free(fep->mii_bus); fec_enet_mii_remove()
2124 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_settings() local
2125 struct phy_device *phydev = fep->phy_dev; fec_enet_get_settings()
2136 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_set_settings() local
2137 struct phy_device *phydev = fep->phy_dev; fec_enet_set_settings()
2148 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_drvinfo() local
2150 strlcpy(info->driver, fep->pdev->dev.driver->name, fec_enet_get_drvinfo()
2158 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_regs_len() local
2162 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); fec_enet_get_regs_len()
2219 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_regs() local
2220 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; fec_enet_get_regs()
2235 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_ts_info() local
2237 if (fep->bufdesc_ex) { fec_enet_get_ts_info()
2245 if (fep->ptp_clock) fec_enet_get_ts_info()
2246 info->phc_index = ptp_clock_index(fep->ptp_clock); fec_enet_get_ts_info()
2266 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_pauseparam() local
2268 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; fec_enet_get_pauseparam()
2269 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; fec_enet_get_pauseparam()
2276 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_set_pauseparam() local
2278 if (!fep->phy_dev) fec_enet_set_pauseparam()
2287 fep->pause_flag = 0; fec_enet_set_pauseparam()
2290 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; fec_enet_set_pauseparam()
2291 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; fec_enet_set_pauseparam()
2294 fep->phy_dev->supported |= ADVERTISED_Pause; fec_enet_set_pauseparam()
2295 fep->phy_dev->advertising |= ADVERTISED_Pause; fec_enet_set_pauseparam()
2297 fep->phy_dev->supported &= ~ADVERTISED_Pause; fec_enet_set_pauseparam()
2298 fep->phy_dev->advertising &= ~ADVERTISED_Pause; fec_enet_set_pauseparam()
2304 phy_start_aneg(fep->phy_dev); fec_enet_set_pauseparam()
2307 napi_disable(&fep->napi); fec_enet_set_pauseparam()
2312 napi_enable(&fep->napi); fec_enet_set_pauseparam()
2387 struct fec_enet_private *fep = netdev_priv(dev); fec_enet_get_ethtool_stats() local
2391 data[i] = readl(fep->hwp + fec_stats[i].offset); fec_enet_get_ethtool_stats()
2420 struct fec_enet_private *fep = netdev_priv(dev); fec_enet_nway_reset() local
2421 struct phy_device *phydev = fep->phy_dev; fec_enet_nway_reset()
2435 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_us_to_itr_clock() local
2437 return us * (fep->itr_clk_rate / 64000) / 1000; fec_enet_us_to_itr_clock()
2443 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_itr_coal_set() local
2446 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) fec_enet_itr_coal_set()
2450 if (!fep->rx_time_itr || !fep->rx_pkts_itr || fec_enet_itr_coal_set()
2451 !fep->tx_time_itr || !fep->tx_pkts_itr) fec_enet_itr_coal_set()
2461 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); fec_enet_itr_coal_set()
2462 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); fec_enet_itr_coal_set()
2463 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); fec_enet_itr_coal_set()
2464 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); fec_enet_itr_coal_set()
2469 writel(tx_itr, fep->hwp + FEC_TXIC0); fec_enet_itr_coal_set()
2470 writel(rx_itr, fep->hwp + FEC_RXIC0); fec_enet_itr_coal_set()
2471 writel(tx_itr, fep->hwp + FEC_TXIC1); fec_enet_itr_coal_set()
2472 writel(rx_itr, fep->hwp + FEC_RXIC1); fec_enet_itr_coal_set()
2473 writel(tx_itr, fep->hwp + FEC_TXIC2); fec_enet_itr_coal_set()
2474 writel(rx_itr, fep->hwp + FEC_RXIC2); fec_enet_itr_coal_set()
2480 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_coalesce() local
2482 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) fec_enet_get_coalesce()
2485 ec->rx_coalesce_usecs = fep->rx_time_itr; fec_enet_get_coalesce()
2486 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; fec_enet_get_coalesce()
2488 ec->tx_coalesce_usecs = fep->tx_time_itr; fec_enet_get_coalesce()
2489 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; fec_enet_get_coalesce()
2497 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_set_coalesce() local
2500 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) fec_enet_set_coalesce()
2513 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); fec_enet_set_coalesce()
2519 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); fec_enet_set_coalesce()
2525 fep->rx_time_itr = ec->rx_coalesce_usecs; fec_enet_set_coalesce()
2526 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; fec_enet_set_coalesce()
2528 fep->tx_time_itr = ec->tx_coalesce_usecs; fec_enet_set_coalesce()
2529 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; fec_enet_set_coalesce()
2553 struct fec_enet_private *fep = netdev_priv(netdev); fec_enet_get_tunable() local
2558 *(u32 *)data = fep->rx_copybreak; fec_enet_get_tunable()
2572 struct fec_enet_private *fep = netdev_priv(netdev); fec_enet_set_tunable() local
2577 fep->rx_copybreak = *(u32 *)data; fec_enet_set_tunable()
2590 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_get_wol() local
2592 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { fec_enet_get_wol()
2594 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; fec_enet_get_wol()
2603 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_set_wol() local
2605 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) fec_enet_set_wol()
2613 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; fec_enet_set_wol()
2614 if (fep->irq[0] > 0) fec_enet_set_wol()
2615 enable_irq_wake(fep->irq[0]); fec_enet_set_wol()
2617 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); fec_enet_set_wol()
2618 if (fep->irq[0] > 0) fec_enet_set_wol()
2619 disable_irq_wake(fep->irq[0]); fec_enet_set_wol()
2651 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_ioctl() local
2652 struct phy_device *phydev = fep->phy_dev; fec_enet_ioctl()
2660 if (fep->bufdesc_ex) { fec_enet_ioctl()
2672 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_free_buffers() local
2680 for (q = 0; q < fep->num_rx_queues; q++) { fec_enet_free_buffers()
2681 rxq = fep->rx_queue[q]; fec_enet_free_buffers()
2687 dma_unmap_single(&fep->pdev->dev, fec_enet_free_buffers()
2689 FEC_ENET_RX_FRSIZE - fep->rx_align, fec_enet_free_buffers()
2693 bdp = fec_enet_get_nextdesc(bdp, fep, q); fec_enet_free_buffers()
2697 for (q = 0; q < fep->num_tx_queues; q++) { fec_enet_free_buffers()
2698 txq = fep->tx_queue[q]; fec_enet_free_buffers()
2712 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_free_queue() local
2716 for (i = 0; i < fep->num_tx_queues; i++) fec_enet_free_queue()
2717 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { fec_enet_free_queue()
2718 txq = fep->tx_queue[i]; fec_enet_free_queue()
2725 for (i = 0; i < fep->num_rx_queues; i++) fec_enet_free_queue()
2726 kfree(fep->rx_queue[i]); fec_enet_free_queue()
2727 for (i = 0; i < fep->num_tx_queues; i++) fec_enet_free_queue()
2728 kfree(fep->tx_queue[i]); fec_enet_free_queue()
2733 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_alloc_queue() local
2738 for (i = 0; i < fep->num_tx_queues; i++) { fec_enet_alloc_queue()
2745 fep->tx_queue[i] = txq; fec_enet_alloc_queue()
2747 fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; fec_enet_alloc_queue()
2763 for (i = 0; i < fep->num_rx_queues; i++) { fec_enet_alloc_queue()
2764 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), fec_enet_alloc_queue()
2766 if (!fep->rx_queue[i]) { fec_enet_alloc_queue()
2771 fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; fec_enet_alloc_queue()
2772 fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; fec_enet_alloc_queue()
2784 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_alloc_rxq_buffers() local
2790 rxq = fep->rx_queue[queue]; fec_enet_alloc_rxq_buffers()
2805 if (fep->bufdesc_ex) { fec_enet_alloc_rxq_buffers()
2810 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_alloc_rxq_buffers()
2814 bdp = fec_enet_get_prevdesc(bdp, fep, queue); fec_enet_alloc_rxq_buffers()
2826 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_alloc_txq_buffers() local
2831 txq = fep->tx_queue[queue]; fec_enet_alloc_txq_buffers()
2841 if (fep->bufdesc_ex) { fec_enet_alloc_txq_buffers()
2846 bdp = fec_enet_get_nextdesc(bdp, fep, queue); fec_enet_alloc_txq_buffers()
2850 bdp = fec_enet_get_prevdesc(bdp, fep, queue); fec_enet_alloc_txq_buffers()
2862 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_alloc_buffers() local
2865 for (i = 0; i < fep->num_rx_queues; i++) fec_enet_alloc_buffers()
2869 for (i = 0; i < fep->num_tx_queues; i++) fec_enet_alloc_buffers()
2878 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_open() local
2881 ret = pm_runtime_get_sync(&fep->pdev->dev); fec_enet_open()
2885 pinctrl_pm_select_default_state(&fep->pdev->dev); fec_enet_open()
2906 napi_enable(&fep->napi); fec_enet_open()
2907 phy_start(fep->phy_dev); fec_enet_open()
2910 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & fec_enet_open()
2920 pm_runtime_mark_last_busy(&fep->pdev->dev); fec_enet_open()
2921 pm_runtime_put_autosuspend(&fep->pdev->dev); fec_enet_open()
2922 pinctrl_pm_select_sleep_state(&fep->pdev->dev); fec_enet_open()
2929 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_close() local
2931 phy_stop(fep->phy_dev); fec_enet_close()
2934 napi_disable(&fep->napi); fec_enet_close()
2939 phy_disconnect(fep->phy_dev); fec_enet_close()
2940 fep->phy_dev = NULL; fec_enet_close()
2943 pinctrl_pm_select_sleep_state(&fep->pdev->dev); fec_enet_close()
2944 pm_runtime_mark_last_busy(&fep->pdev->dev); fec_enet_close()
2945 pm_runtime_put_autosuspend(&fep->pdev->dev); fec_enet_close()
2967 struct fec_enet_private *fep = netdev_priv(ndev); set_multicast_list() local
2973 tmp = readl(fep->hwp + FEC_R_CNTRL); set_multicast_list()
2975 writel(tmp, fep->hwp + FEC_R_CNTRL); set_multicast_list()
2979 tmp = readl(fep->hwp + FEC_R_CNTRL); set_multicast_list()
2981 writel(tmp, fep->hwp + FEC_R_CNTRL); set_multicast_list()
2987 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); set_multicast_list()
2988 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); set_multicast_list()
2995 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); set_multicast_list()
2996 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); set_multicast_list()
3016 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); netdev_for_each_mc_addr()
3018 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); netdev_for_each_mc_addr()
3020 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); netdev_for_each_mc_addr()
3022 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); netdev_for_each_mc_addr()
3031 struct fec_enet_private *fep = netdev_priv(ndev); fec_set_mac_address() local
3050 fep->hwp + FEC_ADDR_LOW); fec_set_mac_address()
3052 fep->hwp + FEC_ADDR_HIGH); fec_set_mac_address()
3067 struct fec_enet_private *fep = netdev_priv(dev); fec_poll_controller() local
3070 if (fep->irq[i] > 0) { fec_poll_controller()
3071 disable_irq(fep->irq[i]); fec_poll_controller()
3072 fec_enet_interrupt(fep->irq[i], dev); fec_poll_controller()
3073 enable_irq(fep->irq[i]); fec_poll_controller()
3082 struct fec_enet_private *fep = netdev_priv(netdev); fec_enet_set_netdev_features() local
3090 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; fec_enet_set_netdev_features()
3092 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; fec_enet_set_netdev_features()
3099 struct fec_enet_private *fep = netdev_priv(netdev); fec_set_features() local
3103 napi_disable(&fep->napi); fec_set_features()
3110 napi_enable(&fep->napi); fec_set_features()
3140 struct fec_enet_private *fep = netdev_priv(ndev); fec_enet_init() local
3149 fep->rx_align = 0xf; fec_enet_init()
3150 fep->tx_align = 0xf; fec_enet_init()
3152 fep->rx_align = 0x3; fec_enet_init()
3153 fep->tx_align = 0x3; fec_enet_init()
3158 if (fep->bufdesc_ex) fec_enet_init()
3159 fep->bufdesc_size = sizeof(struct bufdesc_ex); fec_enet_init()
3161 fep->bufdesc_size = sizeof(struct bufdesc); fec_enet_init()
3162 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * fec_enet_init()
3163 fep->bufdesc_size; fec_enet_init()
3166 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, fec_enet_init()
3180 for (i = 0; i < fep->num_rx_queues; i++) { fec_enet_init()
3181 rxq = fep->rx_queue[i]; fec_enet_init()
3185 if (fep->bufdesc_ex) { fec_enet_init()
3195 for (i = 0; i < fep->num_tx_queues; i++) { fec_enet_init()
3196 txq = fep->tx_queue[i]; fec_enet_init()
3200 if (fep->bufdesc_ex) { fec_enet_init()
3216 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); fec_enet_init()
3217 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); fec_enet_init()
3219 if (fep->quirks & FEC_QUIRK_HAS_VLAN) fec_enet_init()
3223 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { fec_enet_init()
3229 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; fec_enet_init()
3232 if (fep->quirks & FEC_QUIRK_HAS_AVB) { fec_enet_init()
3233 fep->tx_align = 0; fec_enet_init()
3234 fep->rx_align = 0x3f; fec_enet_init()
3321 struct fec_enet_private *fep; fec_probe() local
3343 fep = netdev_priv(ndev); fec_probe()
3348 fep->quirks = pdev->id_entry->driver_data; fec_probe()
3350 fep->netdev = ndev; fec_probe()
3351 fep->num_rx_queues = num_rx_qs; fec_probe()
3352 fep->num_tx_queues = num_tx_qs; fec_probe()
3356 if (fep->quirks & FEC_QUIRK_HAS_GBIT) fec_probe()
3357 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; fec_probe()
3364 fep->hwp = devm_ioremap_resource(&pdev->dev, r); fec_probe()
3365 if (IS_ERR(fep->hwp)) { fec_probe()
3366 ret = PTR_ERR(fep->hwp); fec_probe()
3370 fep->pdev = pdev; fec_probe()
3371 fep->dev_id = dev_id++; fec_probe()
3376 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; fec_probe()
3388 fep->phy_node = phy_node; fec_probe()
3394 fep->phy_interface = pdata->phy; fec_probe()
3396 fep->phy_interface = PHY_INTERFACE_MODE_MII; fec_probe()
3398 fep->phy_interface = ret; fec_probe()
3401 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); fec_probe()
3402 if (IS_ERR(fep->clk_ipg)) { fec_probe()
3403 ret = PTR_ERR(fep->clk_ipg); fec_probe()
3407 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); fec_probe()
3408 if (IS_ERR(fep->clk_ahb)) { fec_probe()
3409 ret = PTR_ERR(fep->clk_ahb); fec_probe()
3413 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); fec_probe()
3416 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); fec_probe()
3417 if (IS_ERR(fep->clk_enet_out)) fec_probe()
3418 fep->clk_enet_out = NULL; fec_probe()
3420 fep->ptp_clk_on = false; fec_probe()
3421 mutex_init(&fep->ptp_clk_mutex); fec_probe()
3424 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); fec_probe()
3425 if (IS_ERR(fep->clk_ref)) fec_probe()
3426 fep->clk_ref = NULL; fec_probe()
3428 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fec_probe()
3429 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fec_probe()
3430 if (IS_ERR(fep->clk_ptp)) { fec_probe()
3431 fep->clk_ptp = NULL; fec_probe()
3432 fep->bufdesc_ex = false; fec_probe()
3439 ret = clk_prepare_enable(fep->clk_ipg); fec_probe()
3443 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); fec_probe()
3444 if (!IS_ERR(fep->reg_phy)) { fec_probe()
3445 ret = regulator_enable(fep->reg_phy); fec_probe()
3452 fep->reg_phy = NULL; fec_probe()
3463 if (fep->bufdesc_ex) fec_probe()
3483 fep->irq[i] = irq; fec_probe()
3486 init_completion(&fep->mdio_done); fec_probe()
3500 device_init_wakeup(&ndev->dev, fep->wol_flag & fec_probe()
3503 if (fep->bufdesc_ex && fep->ptp_clock) fec_probe()
3504 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); fec_probe()
3506 fep->rx_copybreak = COPYBREAK_DEFAULT; fec_probe()
3507 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); fec_probe()
3515 fec_enet_mii_remove(fep); fec_probe()
3520 if (fep->reg_phy) fec_probe()
3521 regulator_disable(fep->reg_phy); fec_probe()
3523 clk_disable_unprepare(fep->clk_ipg); fec_probe()
3539 struct fec_enet_private *fep = netdev_priv(ndev); fec_drv_remove() local
3541 cancel_work_sync(&fep->tx_timeout_work); fec_drv_remove()
3544 fec_enet_mii_remove(fep); fec_drv_remove()
3545 if (fep->reg_phy) fec_drv_remove()
3546 regulator_disable(fep->reg_phy); fec_drv_remove()
3547 of_node_put(fep->phy_node); fec_drv_remove()
3556 struct fec_enet_private *fep = netdev_priv(ndev); fec_suspend() local
3560 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) fec_suspend()
3561 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; fec_suspend()
3562 phy_stop(fep->phy_dev); fec_suspend()
3563 napi_disable(&fep->napi); fec_suspend()
3569 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) fec_suspend()
3570 pinctrl_pm_select_sleep_state(&fep->pdev->dev); fec_suspend()
3574 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) fec_suspend()
3575 regulator_disable(fep->reg_phy); fec_suspend()
3580 if (fep->clk_enet_out || fep->reg_phy) fec_suspend()
3581 fep->link = 0; fec_suspend()
3589 struct fec_enet_private *fep = netdev_priv(ndev); fec_resume() local
3590 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; fec_resume()
3594 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { fec_resume()
3595 ret = regulator_enable(fep->reg_phy); fec_resume()
3607 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { fec_resume()
3610 val = readl(fep->hwp + FEC_ECNTRL); fec_resume()
3612 writel(val, fep->hwp + FEC_ECNTRL); fec_resume()
3613 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; fec_resume()
3615 pinctrl_pm_select_default_state(&fep->pdev->dev); fec_resume()
3621 napi_enable(&fep->napi); fec_resume()
3622 phy_start(fep->phy_dev); fec_resume()
3629 if (fep->reg_phy) fec_resume()
3630 regulator_disable(fep->reg_phy); fec_resume()
3637 struct fec_enet_private *fep = netdev_priv(ndev); fec_runtime_suspend() local
3639 clk_disable_unprepare(fep->clk_ipg); fec_runtime_suspend()
3647 struct fec_enet_private *fep = netdev_priv(ndev); fec_runtime_resume() local
3649 return clk_prepare_enable(fep->clk_ipg); fec_runtime_resume()
224 fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep, int queue_id) fec_enet_get_nextdesc() argument
255 fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep, int queue_id) fec_enet_get_prevdesc() argument
284 fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, struct fec_enet_private *fep) fec_enet_get_bd_index() argument
H A Dfec_ptp.c105 * @fep: the fec_enet_private structure handle
110 static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) fec_ptp_enable_pps() argument
119 if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { fec_ptp_enable_pps()
120 dev_err(&fep->pdev->dev, "No ptp stack is running\n"); fec_ptp_enable_pps()
124 if (fep->pps_enable == enable) fec_ptp_enable_pps()
127 fep->pps_channel = DEFAULT_PPS_CHANNEL; fec_ptp_enable_pps()
128 fep->reload_period = PPS_OUPUT_RELOAD_PERIOD; fec_ptp_enable_pps()
129 inc = fep->ptp_inc; fec_ptp_enable_pps()
131 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_ptp_enable_pps()
136 writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
142 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
145 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
146 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
150 timecounter_read(&fep->tc); fec_ptp_enable_pps()
158 tempval = readl(fep->hwp + FEC_ATIME_CTRL); fec_ptp_enable_pps()
160 writel(tempval, fep->hwp + FEC_ATIME_CTRL); fec_ptp_enable_pps()
162 tempval = readl(fep->hwp + FEC_ATIME); fec_ptp_enable_pps()
164 ns = timecounter_cyc2time(&fep->tc, tempval); fec_ptp_enable_pps()
189 * is bigger than fep->cc.mask would be a error. fec_ptp_enable_pps()
191 val &= fep->cc.mask; fec_ptp_enable_pps()
192 writel(val, fep->hwp + FEC_TCCR(fep->pps_channel)); fec_ptp_enable_pps()
195 fep->next_counter = (val + fep->reload_period) & fep->cc.mask; fec_ptp_enable_pps()
198 val = readl(fep->hwp + FEC_ATIME_CTRL); fec_ptp_enable_pps()
200 writel(val, fep->hwp + FEC_ATIME_CTRL); fec_ptp_enable_pps()
203 val = readl(fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
208 writel(val, fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
213 writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel)); fec_ptp_enable_pps()
214 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; fec_ptp_enable_pps()
216 writel(0, fep->hwp + FEC_TCSR(fep->pps_channel)); fec_ptp_enable_pps()
219 fep->pps_enable = enable; fec_ptp_enable_pps()
220 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_ptp_enable_pps()
235 struct fec_enet_private *fep = fec_ptp_read() local
238 platform_get_device_id(fep->pdev); fec_ptp_read()
241 tempval = readl(fep->hwp + FEC_ATIME_CTRL); fec_ptp_read()
243 writel(tempval, fep->hwp + FEC_ATIME_CTRL); fec_ptp_read()
248 return readl(fep->hwp + FEC_ATIME); fec_ptp_read()
261 struct fec_enet_private *fep = netdev_priv(ndev); fec_ptp_start_cyclecounter() local
265 inc = 1000000000 / fep->cycle_speed; fec_ptp_start_cyclecounter()
268 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_ptp_start_cyclecounter()
271 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC); fec_ptp_start_cyclecounter()
274 writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD); fec_ptp_start_cyclecounter()
277 fep->hwp + FEC_ATIME_CTRL); fec_ptp_start_cyclecounter()
279 memset(&fep->cc, 0, sizeof(fep->cc)); fec_ptp_start_cyclecounter()
280 fep->cc.read = fec_ptp_read; fec_ptp_start_cyclecounter()
281 fep->cc.mask = CLOCKSOURCE_MASK(31); fec_ptp_start_cyclecounter()
282 fep->cc.shift = 31; fec_ptp_start_cyclecounter()
283 fep->cc.mult = FEC_CC_MULT; fec_ptp_start_cyclecounter()
286 timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); fec_ptp_start_cyclecounter()
288 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_ptp_start_cyclecounter()
311 struct fec_enet_private *fep = fec_ptp_adjfreq() local
323 * Try to find the corr_inc between 1 to fep->ptp_inc to fec_ptp_adjfreq()
327 rhs = (u64)ppb * (u64)fep->ptp_inc; fec_ptp_adjfreq()
328 for (i = 1; i <= fep->ptp_inc; i++) { fec_ptp_adjfreq()
339 if (i > fep->ptp_inc) { fec_ptp_adjfreq()
340 corr_inc = fep->ptp_inc; fec_ptp_adjfreq()
345 corr_ns = fep->ptp_inc - corr_inc; fec_ptp_adjfreq()
347 corr_ns = fep->ptp_inc + corr_inc; fec_ptp_adjfreq()
349 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_ptp_adjfreq()
351 tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; fec_ptp_adjfreq()
353 writel(tmp, fep->hwp + FEC_ATIME_INC); fec_ptp_adjfreq()
355 writel(corr_period, fep->hwp + FEC_ATIME_CORR); fec_ptp_adjfreq()
357 timecounter_read(&fep->tc); fec_ptp_adjfreq()
359 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_ptp_adjfreq()
373 struct fec_enet_private *fep = fec_ptp_adjtime() local
377 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_ptp_adjtime()
378 timecounter_adjtime(&fep->tc, delta); fec_ptp_adjtime()
379 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_ptp_adjtime()
419 struct fec_enet_private *fep = fec_ptp_settime() local
426 mutex_lock(&fep->ptp_clk_mutex); fec_ptp_settime()
428 if (!fep->ptp_clk_on) { fec_ptp_settime()
429 mutex_unlock(&fep->ptp_clk_mutex); fec_ptp_settime()
437 counter = ns & fep->cc.mask; fec_ptp_settime()
439 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_ptp_settime()
440 writel(counter, fep->hwp + FEC_ATIME); fec_ptp_settime()
441 timecounter_init(&fep->tc, &fep->cc, ns); fec_ptp_settime()
442 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_ptp_settime()
443 mutex_unlock(&fep->ptp_clk_mutex); fec_ptp_settime()
457 struct fec_enet_private *fep = fec_ptp_enable() local
462 ret = fec_ptp_enable_pps(fep, on); fec_ptp_enable()
477 struct fec_enet_private *fep = netdev_priv(ndev); fec_ptp_set() local
490 fep->hwts_tx_en = 0; fec_ptp_set()
493 fep->hwts_tx_en = 1; fec_ptp_set()
501 if (fep->hwts_rx_en) fec_ptp_set()
502 fep->hwts_rx_en = 0; fec_ptp_set()
507 fep->hwts_rx_en = 1; fec_ptp_set()
518 struct fec_enet_private *fep = netdev_priv(ndev); fec_ptp_get() local
522 config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; fec_ptp_get()
523 config.rx_filter = (fep->hwts_rx_en ? fec_ptp_get()
537 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); fec_time_keep() local
541 mutex_lock(&fep->ptp_clk_mutex); fec_time_keep()
542 if (fep->ptp_clk_on) { fec_time_keep()
543 spin_lock_irqsave(&fep->tmreg_lock, flags); fec_time_keep()
544 ns = timecounter_read(&fep->tc); fec_time_keep()
545 spin_unlock_irqrestore(&fep->tmreg_lock, flags); fec_time_keep()
547 mutex_unlock(&fep->ptp_clk_mutex); fec_time_keep()
549 schedule_delayed_work(&fep->time_keep, HZ); fec_time_keep()
564 struct fec_enet_private *fep = netdev_priv(ndev); fec_ptp_init() local
566 fep->ptp_caps.owner = THIS_MODULE; fec_ptp_init()
567 snprintf(fep->ptp_caps.name, 16, "fec ptp"); fec_ptp_init()
569 fep->ptp_caps.max_adj = 250000000; fec_ptp_init()
570 fep->ptp_caps.n_alarm = 0; fec_ptp_init()
571 fep->ptp_caps.n_ext_ts = 0; fec_ptp_init()
572 fep->ptp_caps.n_per_out = 0; fec_ptp_init()
573 fep->ptp_caps.n_pins = 0; fec_ptp_init()
574 fep->ptp_caps.pps = 1; fec_ptp_init()
575 fep->ptp_caps.adjfreq = fec_ptp_adjfreq; fec_ptp_init()
576 fep->ptp_caps.adjtime = fec_ptp_adjtime; fec_ptp_init()
577 fep->ptp_caps.gettime64 = fec_ptp_gettime; fec_ptp_init()
578 fep->ptp_caps.settime64 = fec_ptp_settime; fec_ptp_init()
579 fep->ptp_caps.enable = fec_ptp_enable; fec_ptp_init()
581 fep->cycle_speed = clk_get_rate(fep->clk_ptp); fec_ptp_init()
582 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed; fec_ptp_init()
584 spin_lock_init(&fep->tmreg_lock); fec_ptp_init()
588 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); fec_ptp_init()
590 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); fec_ptp_init()
591 if (IS_ERR(fep->ptp_clock)) { fec_ptp_init()
592 fep->ptp_clock = NULL; fec_ptp_init()
596 schedule_delayed_work(&fep->time_keep, HZ); fec_ptp_init()
602 struct fec_enet_private *fep = netdev_priv(ndev); fec_ptp_stop() local
604 cancel_delayed_work_sync(&fep->time_keep); fec_ptp_stop()
605 if (fep->ptp_clock) fec_ptp_stop()
606 ptp_clock_unregister(fep->ptp_clock); fec_ptp_stop()
611 * @fep: the fec_enet_private structure handle
615 uint fec_ptp_check_pps_event(struct fec_enet_private *fep) fec_ptp_check_pps_event() argument
618 u8 channel = fep->pps_channel; fec_ptp_check_pps_event()
621 val = readl(fep->hwp + FEC_TCSR(channel)); fec_ptp_check_pps_event()
626 writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); fec_ptp_check_pps_event()
628 writel(val, fep->hwp + FEC_TCSR(channel)); fec_ptp_check_pps_event()
629 } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); fec_ptp_check_pps_event()
632 fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; fec_ptp_check_pps_event()
635 ptp_clock_event(fep->ptp_clock, &event); fec_ptp_check_pps_event()
H A Dfec.h569 uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
/linux-4.4.14/drivers/net/ethernet/freescale/fs_enet/
H A Dfs_enet-main.c69 struct fs_enet_private *fep = netdev_priv(dev); fs_set_multicast_list() local
71 (*fep->ops->set_multicast_list)(dev); fs_set_multicast_list()
85 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); fs_enet_rx_napi() local
86 struct net_device *dev = fep->ndev; fs_enet_rx_napi()
87 const struct fs_platform_info *fpi = fep->fpi; fs_enet_rx_napi()
101 bdp = fep->cur_rx; fs_enet_rx_napi()
104 (*fep->ops->napi_clear_rx_event)(dev); fs_enet_rx_napi()
107 curidx = bdp - fep->rx_bd_base; fs_enet_rx_napi()
114 dev_warn(fep->dev, "rcv is not +last\n"); fs_enet_rx_napi()
121 fep->stats.rx_errors++; fs_enet_rx_napi()
124 fep->stats.rx_length_errors++; fs_enet_rx_napi()
127 fep->stats.rx_frame_errors++; fs_enet_rx_napi()
130 fep->stats.rx_crc_errors++; fs_enet_rx_napi()
133 fep->stats.rx_crc_errors++; fs_enet_rx_napi()
135 skb = fep->rx_skbuff[curidx]; fs_enet_rx_napi()
137 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), fs_enet_rx_napi()
144 skb = fep->rx_skbuff[curidx]; fs_enet_rx_napi()
146 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), fs_enet_rx_napi()
153 fep->stats.rx_packets++; fs_enet_rx_napi()
155 fep->stats.rx_bytes += pkt_len + 4; fs_enet_rx_napi()
179 fep->stats.rx_dropped++; fs_enet_rx_napi()
184 fep->rx_skbuff[curidx] = skbn; fs_enet_rx_napi()
185 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, fs_enet_rx_napi()
197 bdp = fep->rx_bd_base; fs_enet_rx_napi()
199 (*fep->ops->rx_bd_done)(dev); fs_enet_rx_napi()
205 fep->cur_rx = bdp; fs_enet_rx_napi()
210 (*fep->ops->napi_enable_rx)(dev); fs_enet_rx_napi()
217 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, fs_enet_tx_napi() local
219 struct net_device *dev = fep->ndev; fs_enet_tx_napi()
226 spin_lock(&fep->tx_lock); fs_enet_tx_napi()
227 bdp = fep->dirty_tx; fs_enet_tx_napi()
230 (*fep->ops->napi_clear_tx_event)(dev); fs_enet_tx_napi()
234 dirtyidx = bdp - fep->tx_bd_base; fs_enet_tx_napi()
236 if (fep->tx_free == fep->tx_ring) fs_enet_tx_napi()
239 skb = fep->tx_skbuff[dirtyidx]; fs_enet_tx_napi()
248 fep->stats.tx_heartbeat_errors++; fs_enet_tx_napi()
250 fep->stats.tx_window_errors++; fs_enet_tx_napi()
252 fep->stats.tx_aborted_errors++; fs_enet_tx_napi()
254 fep->stats.tx_fifo_errors++; fs_enet_tx_napi()
256 fep->stats.tx_carrier_errors++; fs_enet_tx_napi()
259 fep->stats.tx_errors++; fs_enet_tx_napi()
263 fep->stats.tx_packets++; fs_enet_tx_napi()
266 dev_warn(fep->dev, fs_enet_tx_napi()
275 fep->stats.collisions++; fs_enet_tx_napi()
278 if (fep->mapped_as_page[dirtyidx]) fs_enet_tx_napi()
279 dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), fs_enet_tx_napi()
282 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), fs_enet_tx_napi()
290 fep->tx_skbuff[dirtyidx] = NULL; fs_enet_tx_napi()
299 bdp = fep->tx_bd_base; fs_enet_tx_napi()
305 if (++fep->tx_free >= MAX_SKB_FRAGS) fs_enet_tx_napi()
310 fep->dirty_tx = bdp; fs_enet_tx_napi()
313 (*fep->ops->tx_restart)(dev); fs_enet_tx_napi()
317 (*fep->ops->napi_enable_tx)(dev); fs_enet_tx_napi()
320 spin_unlock(&fep->tx_lock); fs_enet_tx_napi()
338 struct fs_enet_private *fep; fs_enet_interrupt() local
345 fep = netdev_priv(dev); fs_enet_interrupt()
346 fpi = fep->fpi; fs_enet_interrupt()
349 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { fs_enet_interrupt()
353 int_clr_events &= ~fep->ev_napi_rx; fs_enet_interrupt()
355 (*fep->ops->clear_int_events)(dev, int_clr_events); fs_enet_interrupt()
357 if (int_events & fep->ev_err) fs_enet_interrupt()
358 (*fep->ops->ev_error)(dev, int_events); fs_enet_interrupt()
360 if (int_events & fep->ev_rx) { fs_enet_interrupt()
361 napi_ok = napi_schedule_prep(&fep->napi); fs_enet_interrupt()
363 (*fep->ops->napi_disable_rx)(dev); fs_enet_interrupt()
364 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); fs_enet_interrupt()
369 __napi_schedule(&fep->napi); fs_enet_interrupt()
372 if (int_events & fep->ev_tx) { fs_enet_interrupt()
373 napi_ok = napi_schedule_prep(&fep->napi_tx); fs_enet_interrupt()
375 (*fep->ops->napi_disable_tx)(dev); fs_enet_interrupt()
376 (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx); fs_enet_interrupt()
381 __napi_schedule(&fep->napi_tx); fs_enet_interrupt()
391 struct fs_enet_private *fep = netdev_priv(dev); fs_init_bds() local
398 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; fs_init_bds()
399 fep->tx_free = fep->tx_ring; fs_init_bds()
400 fep->cur_rx = fep->rx_bd_base; fs_init_bds()
405 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { fs_init_bds()
411 fep->rx_skbuff[i] = skb; fs_init_bds()
413 dma_map_single(fep->dev, skb->data, fs_init_bds()
418 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); fs_init_bds()
423 for (; i < fep->rx_ring; i++, bdp++) { fs_init_bds()
424 fep->rx_skbuff[i] = NULL; fs_init_bds()
425 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); fs_init_bds()
431 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fs_init_bds()
432 fep->tx_skbuff[i] = NULL; fs_init_bds()
435 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); fs_init_bds()
441 struct fs_enet_private *fep = netdev_priv(dev); fs_cleanup_bds() local
449 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { fs_cleanup_bds()
450 if ((skb = fep->tx_skbuff[i]) == NULL) fs_cleanup_bds()
454 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), fs_cleanup_bds()
457 fep->tx_skbuff[i] = NULL; fs_cleanup_bds()
464 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { fs_cleanup_bds()
465 if ((skb = fep->rx_skbuff[i]) == NULL) fs_cleanup_bds()
469 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), fs_cleanup_bds()
473 fep->rx_skbuff[i] = NULL; fs_cleanup_bds()
514 struct fs_enet_private *fep = netdev_priv(dev); fs_enet_start_xmit() local
551 spin_lock(&fep->tx_lock); fs_enet_start_xmit()
556 bdp = fep->cur_tx; fs_enet_start_xmit()
559 if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { fs_enet_start_xmit()
561 spin_unlock(&fep->tx_lock); fs_enet_start_xmit()
567 dev_warn(fep->dev, "tx queue full!.\n"); fs_enet_start_xmit()
571 curidx = bdp - fep->tx_bd_base; fs_enet_start_xmit()
574 fep->stats.tx_bytes += len; fs_enet_start_xmit()
577 fep->tx_free -= nr_frags + 1; fs_enet_start_xmit()
581 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, fs_enet_start_xmit()
585 fep->mapped_as_page[curidx] = 0; fs_enet_start_xmit()
596 bdp = fep->tx_bd_base, curidx = 0; fs_enet_start_xmit()
599 CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, fs_enet_start_xmit()
603 fep->tx_skbuff[curidx] = NULL; fs_enet_start_xmit()
604 fep->mapped_as_page[curidx] = 1; fs_enet_start_xmit()
623 fep->tx_skbuff[curidx] = skb; fs_enet_start_xmit()
629 bdp = fep->tx_bd_base; fs_enet_start_xmit()
630 fep->cur_tx = bdp; fs_enet_start_xmit()
632 if (fep->tx_free < MAX_SKB_FRAGS) fs_enet_start_xmit()
637 (*fep->ops->tx_kickstart)(dev); fs_enet_start_xmit()
639 spin_unlock(&fep->tx_lock); fs_enet_start_xmit()
646 struct fs_enet_private *fep = netdev_priv(dev); fs_timeout() local
650 fep->stats.tx_errors++; fs_timeout()
652 spin_lock_irqsave(&fep->lock, flags); fs_timeout()
655 phy_stop(fep->phydev); fs_timeout()
656 (*fep->ops->stop)(dev); fs_timeout()
657 (*fep->ops->restart)(dev); fs_timeout()
658 phy_start(fep->phydev); fs_timeout()
661 phy_start(fep->phydev); fs_timeout()
662 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); fs_timeout()
663 spin_unlock_irqrestore(&fep->lock, flags); fs_timeout()
674 struct fs_enet_private *fep = netdev_priv(dev); generic_adjust_link() local
675 struct phy_device *phydev = fep->phydev; generic_adjust_link()
680 if (phydev->duplex != fep->oldduplex) { generic_adjust_link()
682 fep->oldduplex = phydev->duplex; generic_adjust_link()
685 if (phydev->speed != fep->oldspeed) { generic_adjust_link()
687 fep->oldspeed = phydev->speed; generic_adjust_link()
690 if (!fep->oldlink) { generic_adjust_link()
692 fep->oldlink = 1; generic_adjust_link()
696 fep->ops->restart(dev); generic_adjust_link()
697 } else if (fep->oldlink) { generic_adjust_link()
699 fep->oldlink = 0; generic_adjust_link()
700 fep->oldspeed = 0; generic_adjust_link()
701 fep->oldduplex = -1; generic_adjust_link()
704 if (new_state && netif_msg_link(fep)) generic_adjust_link()
711 struct fs_enet_private *fep = netdev_priv(dev); fs_adjust_link() local
714 spin_lock_irqsave(&fep->lock, flags); fs_adjust_link()
716 if(fep->ops->adjust_link) fs_adjust_link()
717 fep->ops->adjust_link(dev); fs_adjust_link()
721 spin_unlock_irqrestore(&fep->lock, flags); fs_adjust_link()
726 struct fs_enet_private *fep = netdev_priv(dev); fs_init_phy() local
730 fep->oldlink = 0; fs_init_phy()
731 fep->oldspeed = 0; fs_init_phy()
732 fep->oldduplex = -1; fs_init_phy()
734 iface = fep->fpi->use_rmii ? fs_init_phy()
737 phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0, fs_init_phy()
744 fep->phydev = phydev; fs_init_phy()
751 struct fs_enet_private *fep = netdev_priv(dev); fs_enet_open() local
755 /* to initialize the fep->cur_rx,... */ fs_enet_open()
757 fs_init_bds(fep->ndev); fs_enet_open()
759 napi_enable(&fep->napi); fs_enet_open()
760 napi_enable(&fep->napi_tx); fs_enet_open()
763 r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED, fs_enet_open()
766 dev_err(fep->dev, "Could not allocate FS_ENET IRQ!"); fs_enet_open()
767 napi_disable(&fep->napi); fs_enet_open()
768 napi_disable(&fep->napi_tx); fs_enet_open()
774 free_irq(fep->interrupt, dev); fs_enet_open()
775 napi_disable(&fep->napi); fs_enet_open()
776 napi_disable(&fep->napi_tx); fs_enet_open()
779 phy_start(fep->phydev); fs_enet_open()
788 struct fs_enet_private *fep = netdev_priv(dev); fs_enet_close() local
793 napi_disable(&fep->napi); fs_enet_close()
794 napi_disable(&fep->napi_tx); fs_enet_close()
795 phy_stop(fep->phydev); fs_enet_close()
797 spin_lock_irqsave(&fep->lock, flags); fs_enet_close()
798 spin_lock(&fep->tx_lock); fs_enet_close()
799 (*fep->ops->stop)(dev); fs_enet_close()
800 spin_unlock(&fep->tx_lock); fs_enet_close()
801 spin_unlock_irqrestore(&fep->lock, flags); fs_enet_close()
804 phy_disconnect(fep->phydev); fs_enet_close()
805 fep->phydev = NULL; fs_enet_close()
806 free_irq(fep->interrupt, dev); fs_enet_close()
813 struct fs_enet_private *fep = netdev_priv(dev); fs_enet_get_stats() local
814 return &fep->stats; fs_enet_get_stats()
828 struct fs_enet_private *fep = netdev_priv(dev); fs_get_regs_len() local
830 return (*fep->ops->get_regs_len)(dev); fs_get_regs_len()
836 struct fs_enet_private *fep = netdev_priv(dev); fs_get_regs() local
842 spin_lock_irqsave(&fep->lock, flags); fs_get_regs()
843 r = (*fep->ops->get_regs)(dev, p, &len); fs_get_regs()
844 spin_unlock_irqrestore(&fep->lock, flags); fs_get_regs()
852 struct fs_enet_private *fep = netdev_priv(dev); fs_get_settings() local
854 if (!fep->phydev) fs_get_settings()
857 return phy_ethtool_gset(fep->phydev, cmd); fs_get_settings()
862 struct fs_enet_private *fep = netdev_priv(dev); fs_set_settings() local
864 if (!fep->phydev) fs_set_settings()
867 return phy_ethtool_sset(fep->phydev, cmd); fs_set_settings()
877 struct fs_enet_private *fep = netdev_priv(dev); fs_get_msglevel() local
878 return fep->msg_enable; fs_get_msglevel()
883 struct fs_enet_private *fep = netdev_priv(dev); fs_set_msglevel() local
884 fep->msg_enable = value; fs_set_msglevel()
902 struct fs_enet_private *fep = netdev_priv(dev); fs_ioctl() local
907 return phy_mii_ioctl(fep->phydev, rq, cmd); fs_ioctl()
942 struct fs_enet_private *fep; fs_enet_probe() local
1004 privsize = sizeof(*fep) + fs_enet_probe()
1018 fep = netdev_priv(ndev); fs_enet_probe()
1019 fep->dev = &ofdev->dev; fs_enet_probe()
1020 fep->ndev = ndev; fs_enet_probe()
1021 fep->fpi = fpi; fs_enet_probe()
1022 fep->ops = match->data; fs_enet_probe()
1024 ret = fep->ops->setup_data(ndev); fs_enet_probe()
1028 fep->rx_skbuff = (struct sk_buff **)&fep[1]; fs_enet_probe()
1029 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; fs_enet_probe()
1030 fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + fs_enet_probe()
1033 spin_lock_init(&fep->lock); fs_enet_probe()
1034 spin_lock_init(&fep->tx_lock); fs_enet_probe()
1040 ret = fep->ops->allocate_bd(ndev); fs_enet_probe()
1044 fep->rx_bd_base = fep->ring_base; fs_enet_probe()
1045 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; fs_enet_probe()
1047 fep->tx_ring = fpi->tx_ring; fs_enet_probe()
1048 fep->rx_ring = fpi->rx_ring; fs_enet_probe()
1052 netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); fs_enet_probe()
1053 netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); fs_enet_probe()
1057 init_timer(&fep->phy_timer_list); fs_enet_probe()
1072 fep->ops->free_bd(ndev); fs_enet_probe()
1074 fep->ops->cleanup_data(ndev); fs_enet_probe()
1089 struct fs_enet_private *fep = netdev_priv(ndev); fs_enet_remove() local
1093 fep->ops->free_bd(ndev); fs_enet_remove()
1094 fep->ops->cleanup_data(ndev); fs_enet_remove()
1095 dev_set_drvdata(fep->dev, NULL); fs_enet_remove()
1096 of_node_put(fep->fpi->phy_node); fs_enet_remove()
1097 if (fep->fpi->clk_per) fs_enet_remove()
1098 clk_disable_unprepare(fep->fpi->clk_per); fs_enet_remove()
H A Dmac-fec.c97 static int do_pd_setup(struct fs_enet_private *fep) do_pd_setup() argument
99 struct platform_device *ofdev = to_platform_device(fep->dev); do_pd_setup()
101 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); do_pd_setup()
102 if (fep->interrupt == NO_IRQ) do_pd_setup()
105 fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0); do_pd_setup()
106 if (!fep->fcc.fccp) do_pd_setup()
121 struct fs_enet_private *fep = netdev_priv(dev); setup_data() local
123 if (do_pd_setup(fep) != 0) setup_data()
126 fep->fec.hthi = 0; setup_data()
127 fep->fec.htlo = 0; setup_data()
129 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; setup_data()
130 fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK; setup_data()
131 fep->ev_rx = FEC_RX_EVENT; setup_data()
132 fep->ev_tx = FEC_TX_EVENT; setup_data()
133 fep->ev_err = FEC_ERR_EVENT_MSK; setup_data()
140 struct fs_enet_private *fep = netdev_priv(dev); allocate_bd() local
141 const struct fs_platform_info *fpi = fep->fpi; allocate_bd()
143 fep->ring_base = (void __force __iomem *)dma_alloc_coherent(fep->dev, allocate_bd()
145 sizeof(cbd_t), &fep->ring_mem_addr, allocate_bd()
147 if (fep->ring_base == NULL) allocate_bd()
155 struct fs_enet_private *fep = netdev_priv(dev); free_bd() local
156 const struct fs_platform_info *fpi = fep->fpi; free_bd()
158 if(fep->ring_base) free_bd()
159 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) free_bd()
161 (void __force *)fep->ring_base, free_bd()
162 fep->ring_mem_addr); free_bd()
172 struct fs_enet_private *fep = netdev_priv(dev); set_promiscuous_mode() local
173 struct fec __iomem *fecp = fep->fec.fecp; set_promiscuous_mode()
180 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_start() local
182 fep->fec.hthi = 0; set_multicast_start()
183 fep->fec.htlo = 0; set_multicast_start()
188 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_one() local
213 fep->fec.hthi |= csrVal; set_multicast_one()
215 fep->fec.htlo |= csrVal; set_multicast_one()
220 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_finish() local
221 struct fec __iomem *fecp = fep->fec.fecp; set_multicast_finish()
226 fep->fec.hthi = 0xffffffffU; set_multicast_finish()
227 fep->fec.htlo = 0xffffffffU; set_multicast_finish()
231 FW(fecp, grp_hash_table_high, fep->fec.hthi); set_multicast_finish()
232 FW(fecp, grp_hash_table_low, fep->fec.htlo); set_multicast_finish()
250 struct fs_enet_private *fep = netdev_priv(dev); restart() local
251 struct fec __iomem *fecp = fep->fec.fecp; restart()
252 const struct fs_platform_info *fpi = fep->fpi; restart()
257 struct mii_bus* mii = fep->phydev->bus; restart()
260 r = whack_reset(fep->fec.fecp); restart()
262 dev_err(fep->dev, "FEC Reset FAILED!\n"); restart()
278 FW(fecp, grp_hash_table_high, fep->fec.hthi); restart()
279 FW(fecp, grp_hash_table_low, fep->fec.htlo); restart()
292 rx_bd_base_phys = fep->ring_mem_addr; restart()
322 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); restart()
336 if (fep->phydev->duplex) { restart()
362 struct fs_enet_private *fep = netdev_priv(dev); stop() local
363 const struct fs_platform_info *fpi = fep->fpi; stop()
364 struct fec __iomem *fecp = fep->fec.fecp; stop()
366 struct fec_info* feci= fep->phydev->bus->priv; stop()
379 dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n"); stop()
401 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_rx_event() local
402 struct fec __iomem *fecp = fep->fec.fecp; napi_clear_rx_event()
409 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_rx() local
410 struct fec __iomem *fecp = fep->fec.fecp; napi_enable_rx()
417 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_rx() local
418 struct fec __iomem *fecp = fep->fec.fecp; napi_disable_rx()
425 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_tx_event() local
426 struct fec __iomem *fecp = fep->fec.fecp; napi_clear_tx_event()
433 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_tx() local
434 struct fec __iomem *fecp = fep->fec.fecp; napi_enable_tx()
441 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_tx() local
442 struct fec __iomem *fecp = fep->fec.fecp; napi_disable_tx()
449 struct fs_enet_private *fep = netdev_priv(dev); rx_bd_done() local
450 struct fec __iomem *fecp = fep->fec.fecp; rx_bd_done()
457 struct fs_enet_private *fep = netdev_priv(dev); tx_kickstart() local
458 struct fec __iomem *fecp = fep->fec.fecp; tx_kickstart()
465 struct fs_enet_private *fep = netdev_priv(dev); get_int_events() local
466 struct fec __iomem *fecp = fep->fec.fecp; get_int_events()
473 struct fs_enet_private *fep = netdev_priv(dev); clear_int_events() local
474 struct fec __iomem *fecp = fep->fec.fecp; clear_int_events()
481 struct fs_enet_private *fep = netdev_priv(dev); ev_error() local
483 dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events); ev_error()
488 struct fs_enet_private *fep = netdev_priv(dev); get_regs() local
493 memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec)); get_regs()
H A Dmac-scc.c90 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) scc_cr_cmd() argument
92 const struct fs_platform_info *fpi = fep->fpi; scc_cr_cmd()
97 static int do_pd_setup(struct fs_enet_private *fep) do_pd_setup() argument
99 struct platform_device *ofdev = to_platform_device(fep->dev); do_pd_setup()
101 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); do_pd_setup()
102 if (fep->interrupt == NO_IRQ) do_pd_setup()
105 fep->scc.sccp = of_iomap(ofdev->dev.of_node, 0); do_pd_setup()
106 if (!fep->scc.sccp) do_pd_setup()
109 fep->scc.ep = of_iomap(ofdev->dev.of_node, 1); do_pd_setup()
110 if (!fep->scc.ep) { do_pd_setup()
111 iounmap(fep->scc.sccp); do_pd_setup()
126 struct fs_enet_private *fep = netdev_priv(dev); setup_data() local
128 do_pd_setup(fep); setup_data()
130 fep->scc.hthi = 0; setup_data()
131 fep->scc.htlo = 0; setup_data()
133 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; setup_data()
134 fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK; setup_data()
135 fep->ev_rx = SCC_RX_EVENT; setup_data()
136 fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE; setup_data()
137 fep->ev_err = SCC_ERR_EVENT_MSK; setup_data()
144 struct fs_enet_private *fep = netdev_priv(dev); allocate_bd() local
145 const struct fs_platform_info *fpi = fep->fpi; allocate_bd()
147 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * allocate_bd()
149 if (IS_ERR_VALUE(fep->ring_mem_addr)) allocate_bd()
152 fep->ring_base = (void __iomem __force*) allocate_bd()
153 cpm_dpram_addr(fep->ring_mem_addr); allocate_bd()
160 struct fs_enet_private *fep = netdev_priv(dev); free_bd() local
162 if (fep->ring_base) free_bd()
163 cpm_dpfree(fep->ring_mem_addr); free_bd()
173 struct fs_enet_private *fep = netdev_priv(dev); set_promiscuous_mode() local
174 scc_t __iomem *sccp = fep->scc.sccp; set_promiscuous_mode()
181 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_start() local
182 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_start()
192 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_one() local
193 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_one()
203 scc_cr_cmd(fep, CPM_CR_SET_GADDR); set_multicast_one()
208 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_finish() local
209 scc_t __iomem *sccp = fep->scc.sccp; set_multicast_finish()
210 scc_enet_t __iomem *ep = fep->scc.ep; set_multicast_finish()
246 struct fs_enet_private *fep = netdev_priv(dev); restart() local
247 scc_t __iomem *sccp = fep->scc.sccp; restart()
248 scc_enet_t __iomem *ep = fep->scc.ep; restart()
249 const struct fs_platform_info *fpi = fep->fpi; restart()
261 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); restart()
263 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); restart()
329 scc_cr_cmd(fep, CPM_CR_INIT_TRX); restart()
355 if (fep->phydev->duplex) restart()
366 struct fs_enet_private *fep = netdev_priv(dev); stop() local
367 scc_t __iomem *sccp = fep->scc.sccp; stop()
374 dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n"); stop()
384 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_rx_event() local
385 scc_t __iomem *sccp = fep->scc.sccp; napi_clear_rx_event()
392 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_rx() local
393 scc_t __iomem *sccp = fep->scc.sccp; napi_enable_rx()
400 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_rx() local
401 scc_t __iomem *sccp = fep->scc.sccp; napi_disable_rx()
408 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_tx_event() local
409 scc_t __iomem *sccp = fep->scc.sccp; napi_clear_tx_event()
416 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_tx() local
417 scc_t __iomem *sccp = fep->scc.sccp; napi_enable_tx()
424 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_tx() local
425 scc_t __iomem *sccp = fep->scc.sccp; napi_disable_tx()
442 struct fs_enet_private *fep = netdev_priv(dev); get_int_events() local
443 scc_t __iomem *sccp = fep->scc.sccp; get_int_events()
450 struct fs_enet_private *fep = netdev_priv(dev); clear_int_events() local
451 scc_t __iomem *sccp = fep->scc.sccp; clear_int_events()
458 struct fs_enet_private *fep = netdev_priv(dev); ev_error() local
460 dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events); ev_error()
465 struct fs_enet_private *fep = netdev_priv(dev); get_regs() local
470 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); get_regs()
473 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t __iomem *)); get_regs()
485 struct fs_enet_private *fep = netdev_priv(dev); tx_restart() local
487 scc_cr_cmd(fep, CPM_CR_RESTART_TX); tx_restart()
H A Dmac-fcc.c79 static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op) fcc_cr_cmd() argument
81 const struct fs_platform_info *fpi = fep->fpi; fcc_cr_cmd()
86 static int do_pd_setup(struct fs_enet_private *fep) do_pd_setup() argument
88 struct platform_device *ofdev = to_platform_device(fep->dev); do_pd_setup()
89 struct fs_platform_info *fpi = fep->fpi; do_pd_setup()
92 fep->interrupt = irq_of_parse_and_map(ofdev->dev.of_node, 0); do_pd_setup()
93 if (fep->interrupt == NO_IRQ) do_pd_setup()
96 fep->fcc.fccp = of_iomap(ofdev->dev.of_node, 0); do_pd_setup()
97 if (!fep->fcc.fccp) do_pd_setup()
100 fep->fcc.ep = of_iomap(ofdev->dev.of_node, 1); do_pd_setup()
101 if (!fep->fcc.ep) do_pd_setup()
104 fep->fcc.fcccp = of_iomap(ofdev->dev.of_node, 2); do_pd_setup()
105 if (!fep->fcc.fcccp) do_pd_setup()
108 fep->fcc.mem = (void __iomem *)cpm2_immr; do_pd_setup()
118 iounmap(fep->fcc.fcccp); do_pd_setup()
120 iounmap(fep->fcc.ep); do_pd_setup()
122 iounmap(fep->fcc.fccp); do_pd_setup()
135 struct fs_enet_private *fep = netdev_priv(dev); setup_data() local
137 if (do_pd_setup(fep) != 0) setup_data()
140 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; setup_data()
141 fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK; setup_data()
142 fep->ev_rx = FCC_RX_EVENT; setup_data()
143 fep->ev_tx = FCC_TX_EVENT; setup_data()
144 fep->ev_err = FCC_ERR_EVENT_MSK; setup_data()
151 struct fs_enet_private *fep = netdev_priv(dev); allocate_bd() local
152 const struct fs_platform_info *fpi = fep->fpi; allocate_bd()
154 fep->ring_base = (void __iomem __force *)dma_alloc_coherent(fep->dev, allocate_bd()
156 sizeof(cbd_t), &fep->ring_mem_addr, allocate_bd()
158 if (fep->ring_base == NULL) allocate_bd()
166 struct fs_enet_private *fep = netdev_priv(dev); free_bd() local
167 const struct fs_platform_info *fpi = fep->fpi; free_bd()
169 if (fep->ring_base) free_bd()
170 dma_free_coherent(fep->dev, free_bd()
172 (void __force *)fep->ring_base, fep->ring_mem_addr); free_bd()
182 struct fs_enet_private *fep = netdev_priv(dev); set_promiscuous_mode() local
183 fcc_t __iomem *fccp = fep->fcc.fccp; set_promiscuous_mode()
190 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_start() local
191 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_start()
199 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_one() local
200 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_one()
210 fcc_cr_cmd(fep, CPM_CR_SET_GADDR); set_multicast_one()
215 struct fs_enet_private *fep = netdev_priv(dev); set_multicast_finish() local
216 fcc_t __iomem *fccp = fep->fcc.fccp; set_multicast_finish()
217 fcc_enet_t __iomem *ep = fep->fcc.ep; set_multicast_finish()
231 fep->fcc.gaddrh = R32(ep, fen_gaddrh); set_multicast_finish()
232 fep->fcc.gaddrl = R32(ep, fen_gaddrl); set_multicast_finish()
250 struct fs_enet_private *fep = netdev_priv(dev); restart() local
251 const struct fs_platform_info *fpi = fep->fpi; restart()
252 fcc_t __iomem *fccp = fep->fcc.fccp; restart()
253 fcc_c_t __iomem *fcccp = fep->fcc.fcccp; restart()
254 fcc_enet_t __iomem *ep = fep->fcc.ep; restart()
267 rx_bd_base_phys = fep->ring_mem_addr; restart()
294 memset_io(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); restart()
317 W32(ep, fen_gaddrh, fep->fcc.gaddrh); restart()
318 W32(ep, fen_gaddrl, fep->fcc.gaddrh); restart()
373 if (fep->phydev->speed == 100) restart()
379 fcc_cr_cmd(fep, CPM_CR_INIT_TRX); restart()
399 if (fep->phydev->duplex) restart()
412 struct fs_enet_private *fep = netdev_priv(dev); stop() local
413 fcc_t __iomem *fccp = fep->fcc.fccp; stop()
429 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_rx_event() local
430 fcc_t __iomem *fccp = fep->fcc.fccp; napi_clear_rx_event()
437 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_rx() local
438 fcc_t __iomem *fccp = fep->fcc.fccp; napi_enable_rx()
445 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_rx() local
446 fcc_t __iomem *fccp = fep->fcc.fccp; napi_disable_rx()
453 struct fs_enet_private *fep = netdev_priv(dev); napi_clear_tx_event() local
454 fcc_t __iomem *fccp = fep->fcc.fccp; napi_clear_tx_event()
461 struct fs_enet_private *fep = netdev_priv(dev); napi_enable_tx() local
462 fcc_t __iomem *fccp = fep->fcc.fccp; napi_enable_tx()
469 struct fs_enet_private *fep = netdev_priv(dev); napi_disable_tx() local
470 fcc_t __iomem *fccp = fep->fcc.fccp; napi_disable_tx()
482 struct fs_enet_private *fep = netdev_priv(dev); tx_kickstart() local
483 fcc_t __iomem *fccp = fep->fcc.fccp; tx_kickstart()
490 struct fs_enet_private *fep = netdev_priv(dev); get_int_events() local
491 fcc_t __iomem *fccp = fep->fcc.fccp; get_int_events()
498 struct fs_enet_private *fep = netdev_priv(dev); clear_int_events() local
499 fcc_t __iomem *fccp = fep->fcc.fccp; clear_int_events()
506 struct fs_enet_private *fep = netdev_priv(dev); ev_error() local
508 dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events); ev_error()
513 struct fs_enet_private *fep = netdev_priv(dev); get_regs() local
518 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); get_regs()
521 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); get_regs()
524 memcpy_fromio(p, fep->fcc.fcccp, 1); get_regs()
546 struct fs_enet_private *fep = netdev_priv(dev); tx_restart() local
547 fcc_t __iomem *fccp = fep->fcc.fccp; tx_restart()
548 const struct fs_platform_info *fpi = fep->fpi; tx_restart()
549 fcc_enet_t __iomem *ep = fep->fcc.ep; tx_restart()
555 last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); tx_restart()
559 ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + tx_restart()
560 fep->ring_base); tx_restart()
562 prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; tx_restart()
571 prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; tx_restart()
579 (uint) (((void *)recheck_bd - fep->ring_base) + tx_restart()
580 fep->ring_mem_addr)); tx_restart()
581 fep->dirty_tx = recheck_bd; tx_restart()
587 fcc_cr_cmd(fep, CPM_CR_RESTART_TX); tx_restart()
/linux-4.4.14/drivers/media/dvb-frontends/
H A Ddib3000mc.c641 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib3000mc_get_frontend() local
645 fep->inversion = INVERSION_AUTO; dib3000mc_get_frontend()
647 fep->bandwidth_hz = state->current_bandwidth; dib3000mc_get_frontend()
650 case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break; dib3000mc_get_frontend()
651 case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break; dib3000mc_get_frontend()
655 case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break; dib3000mc_get_frontend()
656 case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break; dib3000mc_get_frontend()
657 case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break; dib3000mc_get_frontend()
658 case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break; dib3000mc_get_frontend()
662 case 0: fep->modulation = QPSK; break; dib3000mc_get_frontend()
663 case 1: fep->modulation = QAM_16; break; dib3000mc_get_frontend()
665 default: fep->modulation = QAM_64; break; dib3000mc_get_frontend()
671 fep->hierarchy = HIERARCHY_NONE; dib3000mc_get_frontend()
673 case 1: fep->code_rate_HP = FEC_1_2; break; dib3000mc_get_frontend()
674 case 2: fep->code_rate_HP = FEC_2_3; break; dib3000mc_get_frontend()
675 case 3: fep->code_rate_HP = FEC_3_4; break; dib3000mc_get_frontend()
676 case 5: fep->code_rate_HP = FEC_5_6; break; dib3000mc_get_frontend()
678 default: fep->code_rate_HP = FEC_7_8; break; dib3000mc_get_frontend()
683 case 1: fep->code_rate_LP = FEC_1_2; break; dib3000mc_get_frontend()
684 case 2: fep->code_rate_LP = FEC_2_3; break; dib3000mc_get_frontend()
685 case 3: fep->code_rate_LP = FEC_3_4; break; dib3000mc_get_frontend()
686 case 5: fep->code_rate_LP = FEC_5_6; break; dib3000mc_get_frontend()
688 default: fep->code_rate_LP = FEC_7_8; break; dib3000mc_get_frontend()
696 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib3000mc_set_frontend() local
702 state->current_bandwidth = fep->bandwidth_hz; dib3000mc_set_frontend()
703 dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz)); dib3000mc_set_frontend()
713 if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || dib3000mc_set_frontend()
714 fep->guard_interval == GUARD_INTERVAL_AUTO || dib3000mc_set_frontend()
715 fep->modulation == QAM_AUTO || dib3000mc_set_frontend()
716 fep->code_rate_HP == FEC_AUTO) { dib3000mc_set_frontend()
H A Ddib7000m.c1156 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib7000m_get_frontend() local
1160 fep->inversion = INVERSION_AUTO; dib7000m_get_frontend()
1162 fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth); dib7000m_get_frontend()
1165 case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break; dib7000m_get_frontend()
1166 case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break; dib7000m_get_frontend()
1167 /* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */ dib7000m_get_frontend()
1171 case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break; dib7000m_get_frontend()
1172 case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break; dib7000m_get_frontend()
1173 case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break; dib7000m_get_frontend()
1174 case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break; dib7000m_get_frontend()
1178 case 0: fep->modulation = QPSK; break; dib7000m_get_frontend()
1179 case 1: fep->modulation = QAM_16; break; dib7000m_get_frontend()
1181 default: fep->modulation = QAM_64; break; dib7000m_get_frontend()
1187 fep->hierarchy = HIERARCHY_NONE; dib7000m_get_frontend()
1189 case 1: fep->code_rate_HP = FEC_1_2; break; dib7000m_get_frontend()
1190 case 2: fep->code_rate_HP = FEC_2_3; break; dib7000m_get_frontend()
1191 case 3: fep->code_rate_HP = FEC_3_4; break; dib7000m_get_frontend()
1192 case 5: fep->code_rate_HP = FEC_5_6; break; dib7000m_get_frontend()
1194 default: fep->code_rate_HP = FEC_7_8; break; dib7000m_get_frontend()
1199 case 1: fep->code_rate_LP = FEC_1_2; break; dib7000m_get_frontend()
1200 case 2: fep->code_rate_LP = FEC_2_3; break; dib7000m_get_frontend()
1201 case 3: fep->code_rate_LP = FEC_3_4; break; dib7000m_get_frontend()
1202 case 5: fep->code_rate_LP = FEC_5_6; break; dib7000m_get_frontend()
1204 default: fep->code_rate_LP = FEC_7_8; break; dib7000m_get_frontend()
1214 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib7000m_set_frontend() local
1220 dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz)); dib7000m_set_frontend()
1233 if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || dib7000m_set_frontend()
1234 fep->guard_interval == GUARD_INTERVAL_AUTO || dib7000m_set_frontend()
1235 fep->modulation == QAM_AUTO || dib7000m_set_frontend()
1236 fep->code_rate_HP == FEC_AUTO) { dib7000m_set_frontend()
H A Ddib7000p.c1410 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib7000p_get_frontend() local
1414 fep->inversion = INVERSION_AUTO; dib7000p_get_frontend()
1416 fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth); dib7000p_get_frontend()
1420 fep->transmission_mode = TRANSMISSION_MODE_2K; dib7000p_get_frontend()
1423 fep->transmission_mode = TRANSMISSION_MODE_8K; dib7000p_get_frontend()
1425 /* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */ dib7000p_get_frontend()
1430 fep->guard_interval = GUARD_INTERVAL_1_32; dib7000p_get_frontend()
1433 fep->guard_interval = GUARD_INTERVAL_1_16; dib7000p_get_frontend()
1436 fep->guard_interval = GUARD_INTERVAL_1_8; dib7000p_get_frontend()
1439 fep->guard_interval = GUARD_INTERVAL_1_4; dib7000p_get_frontend()
1445 fep->modulation = QPSK; dib7000p_get_frontend()
1448 fep->modulation = QAM_16; dib7000p_get_frontend()
1452 fep->modulation = QAM_64; dib7000p_get_frontend()
1459 fep->hierarchy = HIERARCHY_NONE; dib7000p_get_frontend()
1462 fep->code_rate_HP = FEC_1_2; dib7000p_get_frontend()
1465 fep->code_rate_HP = FEC_2_3; dib7000p_get_frontend()
1468 fep->code_rate_HP = FEC_3_4; dib7000p_get_frontend()
1471 fep->code_rate_HP = FEC_5_6; dib7000p_get_frontend()
1475 fep->code_rate_HP = FEC_7_8; dib7000p_get_frontend()
1482 fep->code_rate_LP = FEC_1_2; dib7000p_get_frontend()
1485 fep->code_rate_LP = FEC_2_3; dib7000p_get_frontend()
1488 fep->code_rate_LP = FEC_3_4; dib7000p_get_frontend()
1491 fep->code_rate_LP = FEC_5_6; dib7000p_get_frontend()
1495 fep->code_rate_LP = FEC_7_8; dib7000p_get_frontend()
1506 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dib7000p_set_frontend() local
1529 if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || dib7000p_set_frontend()
1530 fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) { dib7000p_set_frontend()
/linux-4.4.14/drivers/media/usb/dvb-usb/
H A Ddtt200u-fe.c19 struct dtv_frontend_properties fep; member in struct:dtt200u_fe_state
106 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dtt200u_fe_set_frontend() local
110 u16 freq = fep->frequency / 250000; dtt200u_fe_set_frontend()
113 switch (fep->bandwidth_hz) { dtt200u_fe_set_frontend()
145 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; dtt200u_fe_get_frontend() local
147 memcpy(fep, &state->fep, sizeof(struct dtv_frontend_properties)); dtt200u_fe_get_frontend()
H A Daf9005-fe.c1106 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; af9005_fe_set_frontend() local
1111 deb_info("af9005_fe_set_frontend freq %d bw %d\n", fep->frequency, af9005_fe_set_frontend()
1112 fep->bandwidth_hz); af9005_fe_set_frontend()
1172 ret = af9005_fe_select_bw(state->d, fep->bandwidth_hz); af9005_fe_set_frontend()
1175 ret = af9005_fe_program_cfoe(state->d, fep->bandwidth_hz); af9005_fe_set_frontend()
1232 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; af9005_fe_get_frontend() local
1248 fep->modulation = QPSK; af9005_fe_get_frontend()
1252 fep->modulation = QAM_16; af9005_fe_get_frontend()
1256 fep->modulation = QAM_64; af9005_fe_get_frontend()
1271 fep->hierarchy = HIERARCHY_NONE; af9005_fe_get_frontend()
1275 fep->hierarchy = HIERARCHY_1; af9005_fe_get_frontend()
1279 fep->hierarchy = HIERARCHY_2; af9005_fe_get_frontend()
1283 fep->hierarchy = HIERARCHY_4; af9005_fe_get_frontend()
1307 fep->code_rate_HP = FEC_1_2; af9005_fe_get_frontend()
1311 fep->code_rate_HP = FEC_2_3; af9005_fe_get_frontend()
1315 fep->code_rate_HP = FEC_3_4; af9005_fe_get_frontend()
1319 fep->code_rate_HP = FEC_5_6; af9005_fe_get_frontend()
1323 fep->code_rate_HP = FEC_7_8; af9005_fe_get_frontend()
1338 fep->code_rate_LP = FEC_1_2; af9005_fe_get_frontend()
1342 fep->code_rate_LP = FEC_2_3; af9005_fe_get_frontend()
1346 fep->code_rate_LP = FEC_3_4; af9005_fe_get_frontend()
1350 fep->code_rate_LP = FEC_5_6; af9005_fe_get_frontend()
1354 fep->code_rate_LP = FEC_7_8; af9005_fe_get_frontend()
1368 fep->guard_interval = GUARD_INTERVAL_1_32; af9005_fe_get_frontend()
1372 fep->guard_interval = GUARD_INTERVAL_1_16; af9005_fe_get_frontend()
1376 fep->guard_interval = GUARD_INTERVAL_1_8; af9005_fe_get_frontend()
1380 fep->guard_interval = GUARD_INTERVAL_1_4; af9005_fe_get_frontend()
1395 fep->transmission_mode = TRANSMISSION_MODE_2K; af9005_fe_get_frontend()
1399 fep->transmission_mode = TRANSMISSION_MODE_8K; af9005_fe_get_frontend()
1411 fep->bandwidth_hz = 6000000; af9005_fe_get_frontend()
1415 fep->bandwidth_hz = 7000000; af9005_fe_get_frontend()
1419 fep->bandwidth_hz = 8000000; af9005_fe_get_frontend()
H A Dvp702x-fe.c141 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; vp702x_fe_set_frontend() local
144 u32 freq = fep->frequency/1000; vp702x_fe_set_frontend()
159 sr = (u64) (fep->symbol_rate/1000) << 20; vp702x_fe_set_frontend()
166 fep->frequency, freq, freq, fep->symbol_rate, vp702x_fe_set_frontend()
169 /* if (fep->inversion == INVERSION_ON) vp702x_fe_set_frontend()
175 /* if (fep->symbol_rate > 8000000) vp702x_fe_set_frontend()
178 if (fep->frequency < 1531000) vp702x_fe_set_frontend()
H A Dvp7045-fe.c109 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; vp7045_fe_set_frontend() local
112 u32 freq = fep->frequency / 1000; vp7045_fe_set_frontend()
119 switch (fep->bandwidth_hz) { vp7045_fe_set_frontend()
H A DcinergyT2-fe.c267 struct dtv_frontend_properties *fep = &fe->dtv_property_cache; cinergyt2_fe_set_frontend() local
274 param.tps = cpu_to_le16(compute_tps(fep)); cinergyt2_fe_set_frontend()
275 param.freq = cpu_to_le32(fep->frequency / 1000); cinergyt2_fe_set_frontend()
278 switch (fep->bandwidth_hz) { cinergyt2_fe_set_frontend()
/linux-4.4.14/drivers/misc/mic/scif/
H A Dscif_fd.c171 struct scif_endpt *fep = NULL; scif_fdioctl() local
186 fep = tmpep; scif_fdioctl()
191 if (!fep) { scif_fdioctl()
H A Dscif_api.c95 struct scif_endpt *fep = NULL; scif_disconnect_ep() local
121 fep = tmpep; scif_disconnect_ep()
127 if (!fep) { scif_disconnect_ep()
/linux-4.4.14/drivers/staging/dgap/
H A Ddgap.h210 /* speed if the fep has extended */
768 #define DIGI_AIXON 0x0400 /* Aux flow control in fep */
H A Ddgap.c1334 * Go get from fep mem, what the fep dgap_get_custom_baud()
2713 * Tell the fep to do the command dgap_param()
2719 * Now go get from fep mem, what the fep dgap_param()
6605 dev_err(&pdev->dev, "dgap: fep file %s not found\n", dgap_firmware_load()
/linux-4.4.14/arch/mips/include/asm/octeon/
H A Dcvmx-pciercx-defs.h2118 uint32_t fep:5; member in struct:cvmx_pciercx_cfg070::cvmx_pciercx_cfg070_s
2120 uint32_t fep:5;

Completed in 574 milliseconds