Lines Matching refs:pp

484 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)  in mvreg_write()  argument
486 writel(data, pp->base + offset); in mvreg_write()
490 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) in mvreg_read() argument
492 return readl(pp->base + offset); in mvreg_read()
513 static void mvneta_mib_counters_clear(struct mvneta_port *pp) in mvneta_mib_counters_clear() argument
520 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); in mvneta_mib_counters_clear()
527 struct mvneta_port *pp = netdev_priv(dev); in mvneta_get_stats64() local
538 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
575 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, in mvneta_rxq_non_occup_desc_add() argument
583 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
589 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
594 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, in mvneta_rxq_busy_desc_num_get() argument
599 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
606 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, in mvneta_rxq_desc_num_update() argument
615 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
635 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
651 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) in mvneta_max_rx_size_set() argument
655 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_max_rx_size_set()
659 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_max_rx_size_set()
664 static void mvneta_rxq_offset_set(struct mvneta_port *pp, in mvneta_rxq_offset_set() argument
670 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
675 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
682 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, in mvneta_txq_pend_desc_add() argument
692 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
717 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, in mvneta_rxq_buf_size_set() argument
723 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
728 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
732 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, in mvneta_rxq_bm_disable() argument
737 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
739 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
743 static void mvneta_port_up(struct mvneta_port *pp) in mvneta_port_up() argument
749 mvneta_mib_counters_clear(pp); in mvneta_port_up()
752 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
756 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); in mvneta_port_up()
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
766 mvreg_write(pp, MVNETA_RXQ_CMD, q_map); in mvneta_port_up()
770 static void mvneta_port_down(struct mvneta_port *pp) in mvneta_port_down() argument
776 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; in mvneta_port_down()
780 mvreg_write(pp, MVNETA_RXQ_CMD, in mvneta_port_down()
787 netdev_warn(pp->dev, in mvneta_port_down()
794 val = mvreg_read(pp, MVNETA_RXQ_CMD); in mvneta_port_down()
800 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; in mvneta_port_down()
803 mvreg_write(pp, MVNETA_TXQ_CMD, in mvneta_port_down()
810 netdev_warn(pp->dev, in mvneta_port_down()
818 val = mvreg_read(pp, MVNETA_TXQ_CMD); in mvneta_port_down()
826 netdev_warn(pp->dev, in mvneta_port_down()
833 val = mvreg_read(pp, MVNETA_PORT_STATUS); in mvneta_port_down()
841 static void mvneta_port_enable(struct mvneta_port *pp) in mvneta_port_enable() argument
846 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_enable()
848 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_enable()
852 static void mvneta_port_disable(struct mvneta_port *pp) in mvneta_port_disable() argument
857 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_disable()
859 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_disable()
867 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
880 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); in mvneta_set_ucast_table()
884 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument
897 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); in mvneta_set_special_mcast_table()
902 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_other_mcast_table() argument
908 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
911 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
917 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); in mvneta_set_other_mcast_table()
929 static void mvneta_defaults_set(struct mvneta_port *pp) in mvneta_defaults_set() argument
936 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); in mvneta_defaults_set()
937 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_defaults_set()
938 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_defaults_set()
941 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_defaults_set()
942 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_defaults_set()
943 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_defaults_set()
944 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); in mvneta_defaults_set()
947 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); in mvneta_defaults_set()
953 mvreg_write(pp, MVNETA_CPU_MAP(cpu), in mvneta_defaults_set()
958 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_defaults_set()
959 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_defaults_set()
962 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); in mvneta_defaults_set()
964 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); in mvneta_defaults_set()
965 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); in mvneta_defaults_set()
968 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_defaults_set()
969 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_defaults_set()
973 mvreg_write(pp, MVNETA_ACC_MODE, val); in mvneta_defaults_set()
977 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_defaults_set()
980 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); in mvneta_defaults_set()
981 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); in mvneta_defaults_set()
996 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); in mvneta_defaults_set()
1001 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); in mvneta_defaults_set()
1003 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); in mvneta_defaults_set()
1005 if (pp->use_inband_status) { in mvneta_defaults_set()
1006 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_defaults_set()
1013 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_defaults_set()
1014 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); in mvneta_defaults_set()
1016 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); in mvneta_defaults_set()
1018 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_defaults_set()
1022 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_defaults_set()
1025 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1026 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1027 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1030 mvreg_write(pp, MVNETA_INTR_ENABLE, in mvneta_defaults_set()
1036 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) in mvneta_txq_max_tx_size_set() argument
1047 val = mvreg_read(pp, MVNETA_TX_MTU); in mvneta_txq_max_tx_size_set()
1050 mvreg_write(pp, MVNETA_TX_MTU, val); in mvneta_txq_max_tx_size_set()
1053 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); in mvneta_txq_max_tx_size_set()
1060 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); in mvneta_txq_max_tx_size_set()
1063 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); in mvneta_txq_max_tx_size_set()
1070 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); in mvneta_txq_max_tx_size_set()
1076 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, in mvneta_set_ucast_addr() argument
1092 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); in mvneta_set_ucast_addr()
1102 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); in mvneta_set_ucast_addr()
1106 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, in mvneta_mac_addr_set() argument
1117 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); in mvneta_mac_addr_set()
1118 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); in mvneta_mac_addr_set()
1122 mvneta_set_ucast_addr(pp, addr[5], queue); in mvneta_mac_addr_set()
1128 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, in mvneta_rx_pkts_coal_set() argument
1131 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1139 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, in mvneta_rx_time_coal_set() argument
1145 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1148 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1153 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, in mvneta_tx_done_pkts_coal_set() argument
1158 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1163 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1177 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, in mvneta_txq_sent_desc_dec() argument
1186 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1191 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1195 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, in mvneta_txq_sent_desc_num_get() argument
1201 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1211 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, in mvneta_txq_sent_desc_proc() argument
1217 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1221 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1256 static void mvneta_rx_error(struct mvneta_port *pp, in mvneta_rx_error() argument
1262 netdev_err(pp->dev, in mvneta_rx_error()
1270 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1274 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1278 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1282 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1289 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, in mvneta_rx_csum() argument
1306 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, in mvneta_tx_done_policy() argument
1311 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1315 static void mvneta_txq_bufs_free(struct mvneta_port *pp, in mvneta_txq_bufs_free() argument
1328 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1338 static void mvneta_txq_done(struct mvneta_port *pp, in mvneta_txq_done() argument
1341 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1344 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1348 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done()
1358 static void *mvneta_frag_alloc(const struct mvneta_port *pp) in mvneta_frag_alloc() argument
1360 if (likely(pp->frag_size <= PAGE_SIZE)) in mvneta_frag_alloc()
1361 return netdev_alloc_frag(pp->frag_size); in mvneta_frag_alloc()
1363 return kmalloc(pp->frag_size, GFP_ATOMIC); in mvneta_frag_alloc()
1366 static void mvneta_frag_free(const struct mvneta_port *pp, void *data) in mvneta_frag_free() argument
1368 if (likely(pp->frag_size <= PAGE_SIZE)) in mvneta_frag_free()
1375 static int mvneta_rx_refill(struct mvneta_port *pp, in mvneta_rx_refill() argument
1382 data = mvneta_frag_alloc(pp); in mvneta_rx_refill()
1386 phys_addr = dma_map_single(pp->dev->dev.parent, data, in mvneta_rx_refill()
1387 MVNETA_RX_BUF_SIZE(pp->pkt_size), in mvneta_rx_refill()
1389 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { in mvneta_rx_refill()
1390 mvneta_frag_free(pp, data); in mvneta_rx_refill()
1399 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) in mvneta_skb_tx_csum() argument
1432 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, in mvneta_rx_policy() argument
1437 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; in mvneta_rx_policy()
1441 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, in mvneta_rxq_drop_pkts() argument
1446 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1451 mvneta_frag_free(pp, data); in mvneta_rxq_drop_pkts()
1452 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, in mvneta_rxq_drop_pkts()
1453 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); in mvneta_rxq_drop_pkts()
1457 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1461 static int mvneta_rx(struct mvneta_port *pp, int rx_todo, in mvneta_rx() argument
1464 struct net_device *dev = pp->dev; in mvneta_rx()
1470 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx()
1496 mvneta_rx_error(pp, rx_desc); in mvneta_rx()
1517 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx()
1518 napi_gro_receive(&pp->napi, skb); in mvneta_rx()
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); in mvneta_rx()
1532 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); in mvneta_rx()
1543 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx()
1545 napi_gro_receive(&pp->napi, skb); in mvneta_rx()
1548 err = mvneta_rx_refill(pp, rx_desc); in mvneta_rx()
1557 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx()
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); in mvneta_rx()
1573 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
1581 tx_desc->command = mvneta_skb_tx_csum(pp, skb); in mvneta_tso_put_hdr()
1625 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx_tso() local
1654 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
1682 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_tso()
1692 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, in mvneta_tx_frag_process() argument
1706 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
1709 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
1735 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_frag_process()
1748 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx() local
1750 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
1769 tx_cmd = mvneta_skb_tx_csum(pp, skb); in mvneta_tx()
1796 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
1809 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
1813 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
1832 static void mvneta_txq_done_force(struct mvneta_port *pp, in mvneta_txq_done_force() argument
1838 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done_force()
1849 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) in mvneta_tx_done_gbe() argument
1855 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
1857 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
1861 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
1896 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, in mvneta_set_special_mcast_addr() argument
1909 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST in mvneta_set_special_mcast_addr()
1919 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, in mvneta_set_special_mcast_addr()
1931 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, in mvneta_set_other_mcast_addr() argument
1942 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); in mvneta_set_other_mcast_addr()
1952 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); in mvneta_set_other_mcast_addr()
1964 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, in mvneta_mcast_addr_set() argument
1970 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); in mvneta_mcast_addr_set()
1976 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
1977 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
1982 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
1983 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
1984 netdev_info(pp->dev, in mvneta_mcast_addr_set()
1986 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
1990 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
1992 mvneta_set_other_mcast_addr(pp, crc_result, queue); in mvneta_mcast_addr_set()
1998 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, in mvneta_rx_unicast_promisc_set() argument
2003 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); in mvneta_rx_unicast_promisc_set()
2005 val = mvreg_read(pp, MVNETA_TYPE_PRIO); in mvneta_rx_unicast_promisc_set()
2012 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); in mvneta_rx_unicast_promisc_set()
2013 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); in mvneta_rx_unicast_promisc_set()
2020 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); in mvneta_rx_unicast_promisc_set()
2021 mvreg_write(pp, MVNETA_TYPE_PRIO, val); in mvneta_rx_unicast_promisc_set()
2027 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_rx_mode() local
2032 mvneta_rx_unicast_promisc_set(pp, 1); in mvneta_set_rx_mode()
2033 mvneta_set_ucast_table(pp, rxq_def); in mvneta_set_rx_mode()
2034 mvneta_set_special_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2035 mvneta_set_other_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2038 mvneta_rx_unicast_promisc_set(pp, 0); in mvneta_set_rx_mode()
2039 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
2040 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); in mvneta_set_rx_mode()
2044 mvneta_set_special_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2045 mvneta_set_other_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2048 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
2049 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
2053 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
2064 struct mvneta_port *pp = (struct mvneta_port *)dev_id; in mvneta_isr() local
2067 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_isr()
2069 napi_schedule(&pp->napi); in mvneta_isr()
2074 static int mvneta_fixed_link_update(struct mvneta_port *pp, in mvneta_fixed_link_update() argument
2079 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_fixed_link_update()
2108 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll() local
2110 if (!netif_running(pp->dev)) { in mvneta_poll()
2116 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); in mvneta_poll()
2118 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); in mvneta_poll()
2120 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_poll()
2121 if (pp->use_inband_status && (cause_misc & in mvneta_poll()
2125 mvneta_fixed_link_update(pp, pp->phy_dev); in mvneta_poll()
2131 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); in mvneta_poll()
2138 cause_rx_tx |= pp->cause_rx_tx; in mvneta_poll()
2144 rxq = mvneta_rx_policy(pp, cause_rx_tx); in mvneta_poll()
2149 count = mvneta_rx(pp, budget, rxq); in mvneta_poll()
2163 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); in mvneta_poll()
2171 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_poll()
2178 pp->cause_rx_tx = cause_rx_tx; in mvneta_poll()
2183 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
2190 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { in mvneta_rxq_fill()
2191 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", in mvneta_rxq_fill()
2200 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
2206 static void mvneta_tx_reset(struct mvneta_port *pp) in mvneta_tx_reset() argument
2212 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
2214 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_tx_reset()
2215 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_tx_reset()
2218 static void mvneta_rx_reset(struct mvneta_port *pp) in mvneta_rx_reset() argument
2220 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_rx_reset()
2221 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_rx_reset()
2227 static int mvneta_rxq_init(struct mvneta_port *pp, in mvneta_rxq_init() argument
2231 rxq->size = pp->rx_ring_size; in mvneta_rxq_init()
2234 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_init()
2246 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_init()
2247 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_init()
2250 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); in mvneta_rxq_init()
2253 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_init()
2254 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_init()
2257 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_init()
2258 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_init()
2259 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_init()
2265 static void mvneta_rxq_deinit(struct mvneta_port *pp, in mvneta_rxq_deinit() argument
2268 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
2271 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
2283 static int mvneta_txq_init(struct mvneta_port *pp, in mvneta_txq_init() argument
2286 txq->size = pp->tx_ring_size; in mvneta_txq_init()
2297 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2310 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_init()
2311 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_init()
2314 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_init()
2315 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_init()
2319 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2326 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2331 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2336 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_init()
2342 static void mvneta_txq_deinit(struct mvneta_port *pp, in mvneta_txq_deinit() argument
2348 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_deinit()
2352 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_deinit()
2362 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_deinit()
2363 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_deinit()
2366 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_deinit()
2367 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_deinit()
2371 static void mvneta_cleanup_txqs(struct mvneta_port *pp) in mvneta_cleanup_txqs() argument
2376 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
2380 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) in mvneta_cleanup_rxqs() argument
2385 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); in mvneta_cleanup_rxqs()
2390 static int mvneta_setup_rxqs(struct mvneta_port *pp) in mvneta_setup_rxqs() argument
2395 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); in mvneta_setup_rxqs()
2397 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
2399 mvneta_cleanup_rxqs(pp); in mvneta_setup_rxqs()
2408 static int mvneta_setup_txqs(struct mvneta_port *pp) in mvneta_setup_txqs() argument
2413 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
2415 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
2417 mvneta_cleanup_txqs(pp); in mvneta_setup_txqs()
2425 static void mvneta_start_dev(struct mvneta_port *pp) in mvneta_start_dev() argument
2427 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
2428 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
2431 mvneta_port_enable(pp); in mvneta_start_dev()
2434 napi_enable(&pp->napi); in mvneta_start_dev()
2437 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_start_dev()
2441 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_start_dev()
2446 phy_start(pp->phy_dev); in mvneta_start_dev()
2447 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
2450 static void mvneta_stop_dev(struct mvneta_port *pp) in mvneta_stop_dev() argument
2452 phy_stop(pp->phy_dev); in mvneta_stop_dev()
2454 napi_disable(&pp->napi); in mvneta_stop_dev()
2456 netif_carrier_off(pp->dev); in mvneta_stop_dev()
2458 mvneta_port_down(pp); in mvneta_stop_dev()
2459 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
2462 mvneta_port_disable(pp); in mvneta_stop_dev()
2465 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_stop_dev()
2466 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_stop_dev()
2469 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_stop_dev()
2470 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_stop_dev()
2471 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_stop_dev()
2473 mvneta_tx_reset(pp); in mvneta_stop_dev()
2474 mvneta_rx_reset(pp); in mvneta_stop_dev()
2503 struct mvneta_port *pp = netdev_priv(dev); in mvneta_change_mtu() local
2520 mvneta_stop_dev(pp); in mvneta_change_mtu()
2522 mvneta_cleanup_txqs(pp); in mvneta_change_mtu()
2523 mvneta_cleanup_rxqs(pp); in mvneta_change_mtu()
2525 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
2526 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_change_mtu()
2529 ret = mvneta_setup_rxqs(pp); in mvneta_change_mtu()
2535 ret = mvneta_setup_txqs(pp); in mvneta_change_mtu()
2541 mvneta_start_dev(pp); in mvneta_change_mtu()
2542 mvneta_port_up(pp); in mvneta_change_mtu()
2552 struct mvneta_port *pp = netdev_priv(dev); in mvneta_fix_features() local
2554 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
2558 pp->tx_csum_limit); in mvneta_fix_features()
2565 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) in mvneta_get_mac_addr() argument
2569 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); in mvneta_get_mac_addr()
2570 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); in mvneta_get_mac_addr()
2582 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_mac_addr() local
2590 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
2593 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); in mvneta_set_mac_addr()
2601 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_adjust_link() local
2602 struct phy_device *phydev = pp->phy_dev; in mvneta_adjust_link()
2606 if ((pp->speed != phydev->speed) || in mvneta_adjust_link()
2607 (pp->duplex != phydev->duplex)) { in mvneta_adjust_link()
2610 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_adjust_link()
2623 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_adjust_link()
2625 pp->duplex = phydev->duplex; in mvneta_adjust_link()
2626 pp->speed = phydev->speed; in mvneta_adjust_link()
2630 if (phydev->link != pp->link) { in mvneta_adjust_link()
2632 pp->duplex = -1; in mvneta_adjust_link()
2633 pp->speed = 0; in mvneta_adjust_link()
2636 pp->link = phydev->link; in mvneta_adjust_link()
2642 if (!pp->use_inband_status) { in mvneta_adjust_link()
2643 u32 val = mvreg_read(pp, in mvneta_adjust_link()
2647 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_adjust_link()
2650 mvneta_port_up(pp); in mvneta_adjust_link()
2652 if (!pp->use_inband_status) { in mvneta_adjust_link()
2653 u32 val = mvreg_read(pp, in mvneta_adjust_link()
2657 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_adjust_link()
2660 mvneta_port_down(pp); in mvneta_adjust_link()
2666 static int mvneta_mdio_probe(struct mvneta_port *pp) in mvneta_mdio_probe() argument
2670 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, in mvneta_mdio_probe()
2671 pp->phy_interface); in mvneta_mdio_probe()
2673 netdev_err(pp->dev, "could not find the PHY\n"); in mvneta_mdio_probe()
2680 pp->phy_dev = phy_dev; in mvneta_mdio_probe()
2681 pp->link = 0; in mvneta_mdio_probe()
2682 pp->duplex = 0; in mvneta_mdio_probe()
2683 pp->speed = 0; in mvneta_mdio_probe()
2688 static void mvneta_mdio_remove(struct mvneta_port *pp) in mvneta_mdio_remove() argument
2690 phy_disconnect(pp->phy_dev); in mvneta_mdio_remove()
2691 pp->phy_dev = NULL; in mvneta_mdio_remove()
2696 struct mvneta_port *pp = netdev_priv(dev); in mvneta_open() local
2699 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
2700 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_open()
2703 ret = mvneta_setup_rxqs(pp); in mvneta_open()
2707 ret = mvneta_setup_txqs(pp); in mvneta_open()
2712 ret = request_irq(pp->dev->irq, mvneta_isr, 0, in mvneta_open()
2713 MVNETA_DRIVER_NAME, pp); in mvneta_open()
2715 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
2720 netif_carrier_off(pp->dev); in mvneta_open()
2722 ret = mvneta_mdio_probe(pp); in mvneta_open()
2728 mvneta_start_dev(pp); in mvneta_open()
2733 free_irq(pp->dev->irq, pp); in mvneta_open()
2735 mvneta_cleanup_txqs(pp); in mvneta_open()
2737 mvneta_cleanup_rxqs(pp); in mvneta_open()
2744 struct mvneta_port *pp = netdev_priv(dev); in mvneta_stop() local
2746 mvneta_stop_dev(pp); in mvneta_stop()
2747 mvneta_mdio_remove(pp); in mvneta_stop()
2748 free_irq(dev->irq, pp); in mvneta_stop()
2749 mvneta_cleanup_rxqs(pp); in mvneta_stop()
2750 mvneta_cleanup_txqs(pp); in mvneta_stop()
2757 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ioctl() local
2759 if (!pp->phy_dev) in mvneta_ioctl()
2762 return phy_mii_ioctl(pp->phy_dev, ifr, cmd); in mvneta_ioctl()
2770 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_settings() local
2772 if (!pp->phy_dev) in mvneta_ethtool_get_settings()
2775 return phy_ethtool_gset(pp->phy_dev, cmd); in mvneta_ethtool_get_settings()
2781 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_settings() local
2783 if (!pp->phy_dev) in mvneta_ethtool_set_settings()
2786 return phy_ethtool_sset(pp->phy_dev, cmd); in mvneta_ethtool_set_settings()
2793 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_coalesce() local
2797 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
2800 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
2801 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
2805 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
2807 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
2817 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_coalesce() local
2819 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
2820 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
2822 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
2842 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_ringparam() local
2846 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
2847 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
2853 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_ringparam() local
2857 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
2860 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
2862 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
2864 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
2902 static int mvneta_init(struct device *dev, struct mvneta_port *pp) in mvneta_init() argument
2907 mvneta_port_disable(pp); in mvneta_init()
2910 mvneta_defaults_set(pp); in mvneta_init()
2912 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), in mvneta_init()
2914 if (!pp->txqs) in mvneta_init()
2919 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
2921 txq->size = pp->tx_ring_size; in mvneta_init()
2925 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), in mvneta_init()
2927 if (!pp->rxqs) in mvneta_init()
2932 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
2934 rxq->size = pp->rx_ring_size; in mvneta_init()
2943 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, in mvneta_conf_mbus_windows() argument
2951 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_conf_mbus_windows()
2952 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_conf_mbus_windows()
2955 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_conf_mbus_windows()
2963 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | in mvneta_conf_mbus_windows()
2966 mvreg_write(pp, MVNETA_WIN_SIZE(i), in mvneta_conf_mbus_windows()
2973 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_conf_mbus_windows()
2977 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) in mvneta_port_power_up() argument
2982 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); in mvneta_port_power_up()
2984 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); in mvneta_port_power_up()
2991 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); in mvneta_port_power_up()
2995 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); in mvneta_port_power_up()
3006 if (pp->use_inband_status) in mvneta_port_power_up()
3011 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); in mvneta_port_power_up()
3013 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & in mvneta_port_power_up()
3027 struct mvneta_port *pp; in mvneta_probe() local
3087 pp = netdev_priv(dev); in mvneta_probe()
3088 pp->phy_node = phy_node; in mvneta_probe()
3089 pp->phy_interface = phy_mode; in mvneta_probe()
3092 pp->use_inband_status = (err == 0 && in mvneta_probe()
3095 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
3096 if (IS_ERR(pp->clk)) { in mvneta_probe()
3097 err = PTR_ERR(pp->clk); in mvneta_probe()
3101 clk_prepare_enable(pp->clk); in mvneta_probe()
3104 pp->base = devm_ioremap_resource(&pdev->dev, res); in mvneta_probe()
3105 if (IS_ERR(pp->base)) { in mvneta_probe()
3106 err = PTR_ERR(pp->base); in mvneta_probe()
3111 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
3112 if (!pp->stats) { in mvneta_probe()
3122 mvneta_get_mac_addr(pp, hw_mac_addr); in mvneta_probe()
3133 pp->tx_csum_limit = 1600; in mvneta_probe()
3135 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
3136 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
3138 pp->dev = dev; in mvneta_probe()
3141 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
3145 err = mvneta_port_power_up(pp, phy_mode); in mvneta_probe()
3153 mvneta_conf_mbus_windows(pp, dram_target_info); in mvneta_probe()
3155 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); in mvneta_probe()
3172 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
3174 if (pp->use_inband_status) { in mvneta_probe()
3177 mvneta_fixed_link_update(pp, phy); in mvneta_probe()
3183 free_percpu(pp->stats); in mvneta_probe()
3185 clk_disable_unprepare(pp->clk); in mvneta_probe()
3199 struct mvneta_port *pp = netdev_priv(dev); in mvneta_remove() local
3202 clk_disable_unprepare(pp->clk); in mvneta_remove()
3203 free_percpu(pp->stats); in mvneta_remove()
3205 of_node_put(pp->phy_node); in mvneta_remove()