Lines Matching refs:pp

339 	struct mvneta_port	*pp;  member
546 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) in mvreg_write() argument
548 writel(data, pp->base + offset); in mvreg_write()
552 static u32 mvreg_read(struct mvneta_port *pp, u32 offset) in mvreg_read() argument
554 return readl(pp->base + offset); in mvreg_read()
575 static void mvneta_mib_counters_clear(struct mvneta_port *pp) in mvneta_mib_counters_clear() argument
582 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); in mvneta_mib_counters_clear()
583 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); in mvneta_mib_counters_clear()
584 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); in mvneta_mib_counters_clear()
591 struct mvneta_port *pp = netdev_priv(dev); in mvneta_get_stats64() local
602 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
639 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, in mvneta_rxq_non_occup_desc_add() argument
647 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
653 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), in mvneta_rxq_non_occup_desc_add()
658 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, in mvneta_rxq_busy_desc_num_get() argument
663 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); in mvneta_rxq_busy_desc_num_get()
670 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, in mvneta_rxq_desc_num_update() argument
679 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
699 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); in mvneta_rxq_desc_num_update()
715 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) in mvneta_max_rx_size_set() argument
719 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_max_rx_size_set()
723 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_max_rx_size_set()
728 static void mvneta_rxq_offset_set(struct mvneta_port *pp, in mvneta_rxq_offset_set() argument
734 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_offset_set()
739 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_offset_set()
746 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, in mvneta_txq_pend_desc_add() argument
756 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_pend_desc_add()
781 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, in mvneta_rxq_buf_size_set() argument
787 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); in mvneta_rxq_buf_size_set()
792 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); in mvneta_rxq_buf_size_set()
796 static void mvneta_rxq_bm_disable(struct mvneta_port *pp, in mvneta_rxq_bm_disable() argument
801 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); in mvneta_rxq_bm_disable()
803 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); in mvneta_rxq_bm_disable()
807 static void mvneta_port_up(struct mvneta_port *pp) in mvneta_port_up() argument
815 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
819 mvreg_write(pp, MVNETA_TXQ_CMD, q_map); in mvneta_port_up()
822 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); in mvneta_port_up()
826 static void mvneta_port_down(struct mvneta_port *pp) in mvneta_port_down() argument
832 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; in mvneta_port_down()
836 mvreg_write(pp, MVNETA_RXQ_CMD, in mvneta_port_down()
843 netdev_warn(pp->dev, in mvneta_port_down()
850 val = mvreg_read(pp, MVNETA_RXQ_CMD); in mvneta_port_down()
856 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; in mvneta_port_down()
859 mvreg_write(pp, MVNETA_TXQ_CMD, in mvneta_port_down()
866 netdev_warn(pp->dev, in mvneta_port_down()
874 val = mvreg_read(pp, MVNETA_TXQ_CMD); in mvneta_port_down()
882 netdev_warn(pp->dev, in mvneta_port_down()
889 val = mvreg_read(pp, MVNETA_PORT_STATUS); in mvneta_port_down()
897 static void mvneta_port_enable(struct mvneta_port *pp) in mvneta_port_enable() argument
902 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_enable()
904 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_enable()
908 static void mvneta_port_disable(struct mvneta_port *pp) in mvneta_port_disable() argument
913 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); in mvneta_port_disable()
915 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); in mvneta_port_disable()
923 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
936 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); in mvneta_set_ucast_table()
940 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument
953 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); in mvneta_set_special_mcast_table()
958 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_other_mcast_table() argument
964 memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
967 memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); in mvneta_set_other_mcast_table()
973 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); in mvneta_set_other_mcast_table()
985 static void mvneta_defaults_set(struct mvneta_port *pp) in mvneta_defaults_set() argument
992 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); in mvneta_defaults_set()
993 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_defaults_set()
994 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_defaults_set()
997 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_defaults_set()
998 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_defaults_set()
999 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_defaults_set()
1000 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); in mvneta_defaults_set()
1003 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); in mvneta_defaults_set()
1009 mvreg_write(pp, MVNETA_CPU_MAP(cpu), in mvneta_defaults_set()
1014 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_defaults_set()
1015 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_defaults_set()
1018 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); in mvneta_defaults_set()
1020 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); in mvneta_defaults_set()
1021 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); in mvneta_defaults_set()
1024 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_defaults_set()
1025 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_defaults_set()
1029 mvreg_write(pp, MVNETA_ACC_MODE, val); in mvneta_defaults_set()
1033 mvreg_write(pp, MVNETA_PORT_CONFIG, val); in mvneta_defaults_set()
1036 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); in mvneta_defaults_set()
1037 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); in mvneta_defaults_set()
1052 mvreg_write(pp, MVNETA_SDMA_CONFIG, val); in mvneta_defaults_set()
1057 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); in mvneta_defaults_set()
1059 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); in mvneta_defaults_set()
1061 if (pp->use_inband_status) { in mvneta_defaults_set()
1062 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_defaults_set()
1069 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_defaults_set()
1070 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); in mvneta_defaults_set()
1072 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); in mvneta_defaults_set()
1074 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_defaults_set()
1078 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_defaults_set()
1081 mvneta_set_ucast_table(pp, -1); in mvneta_defaults_set()
1082 mvneta_set_special_mcast_table(pp, -1); in mvneta_defaults_set()
1083 mvneta_set_other_mcast_table(pp, -1); in mvneta_defaults_set()
1086 mvreg_write(pp, MVNETA_INTR_ENABLE, in mvneta_defaults_set()
1090 mvneta_mib_counters_clear(pp); in mvneta_defaults_set()
1094 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) in mvneta_txq_max_tx_size_set() argument
1105 val = mvreg_read(pp, MVNETA_TX_MTU); in mvneta_txq_max_tx_size_set()
1108 mvreg_write(pp, MVNETA_TX_MTU, val); in mvneta_txq_max_tx_size_set()
1111 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); in mvneta_txq_max_tx_size_set()
1118 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); in mvneta_txq_max_tx_size_set()
1121 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); in mvneta_txq_max_tx_size_set()
1128 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); in mvneta_txq_max_tx_size_set()
1134 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, in mvneta_set_ucast_addr() argument
1150 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); in mvneta_set_ucast_addr()
1160 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); in mvneta_set_ucast_addr()
1164 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, in mvneta_mac_addr_set() argument
1175 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); in mvneta_mac_addr_set()
1176 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); in mvneta_mac_addr_set()
1180 mvneta_set_ucast_addr(pp, addr[5], queue); in mvneta_mac_addr_set()
1186 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, in mvneta_rx_pkts_coal_set() argument
1189 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), in mvneta_rx_pkts_coal_set()
1197 static void mvneta_rx_time_coal_set(struct mvneta_port *pp, in mvneta_rx_time_coal_set() argument
1203 clk_rate = clk_get_rate(pp->clk); in mvneta_rx_time_coal_set()
1206 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); in mvneta_rx_time_coal_set()
1211 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, in mvneta_tx_done_pkts_coal_set() argument
1216 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); in mvneta_tx_done_pkts_coal_set()
1221 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); in mvneta_tx_done_pkts_coal_set()
1235 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, in mvneta_txq_sent_desc_dec() argument
1244 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1249 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); in mvneta_txq_sent_desc_dec()
1253 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, in mvneta_txq_sent_desc_num_get() argument
1259 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); in mvneta_txq_sent_desc_num_get()
1269 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, in mvneta_txq_sent_desc_proc() argument
1275 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); in mvneta_txq_sent_desc_proc()
1279 mvneta_txq_sent_desc_dec(pp, txq, sent_desc); in mvneta_txq_sent_desc_proc()
1314 static void mvneta_rx_error(struct mvneta_port *pp, in mvneta_rx_error() argument
1320 netdev_err(pp->dev, in mvneta_rx_error()
1328 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", in mvneta_rx_error()
1332 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", in mvneta_rx_error()
1336 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", in mvneta_rx_error()
1340 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", in mvneta_rx_error()
1347 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, in mvneta_rx_csum() argument
1364 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, in mvneta_tx_done_policy() argument
1369 return &pp->txqs[queue]; in mvneta_tx_done_policy()
1373 static void mvneta_txq_bufs_free(struct mvneta_port *pp, in mvneta_txq_bufs_free() argument
1386 dma_unmap_single(pp->dev->dev.parent, in mvneta_txq_bufs_free()
1396 static void mvneta_txq_done(struct mvneta_port *pp, in mvneta_txq_done() argument
1399 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done()
1402 tx_done = mvneta_txq_sent_desc_proc(pp, txq); in mvneta_txq_done()
1406 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done()
1416 static void *mvneta_frag_alloc(const struct mvneta_port *pp) in mvneta_frag_alloc() argument
1418 if (likely(pp->frag_size <= PAGE_SIZE)) in mvneta_frag_alloc()
1419 return netdev_alloc_frag(pp->frag_size); in mvneta_frag_alloc()
1421 return kmalloc(pp->frag_size, GFP_ATOMIC); in mvneta_frag_alloc()
1424 static void mvneta_frag_free(const struct mvneta_port *pp, void *data) in mvneta_frag_free() argument
1426 if (likely(pp->frag_size <= PAGE_SIZE)) in mvneta_frag_free()
1433 static int mvneta_rx_refill(struct mvneta_port *pp, in mvneta_rx_refill() argument
1440 data = mvneta_frag_alloc(pp); in mvneta_rx_refill()
1444 phys_addr = dma_map_single(pp->dev->dev.parent, data, in mvneta_rx_refill()
1445 MVNETA_RX_BUF_SIZE(pp->pkt_size), in mvneta_rx_refill()
1447 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { in mvneta_rx_refill()
1448 mvneta_frag_free(pp, data); in mvneta_rx_refill()
1457 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) in mvneta_skb_tx_csum() argument
1488 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, in mvneta_rxq_drop_pkts() argument
1493 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rxq_drop_pkts()
1498 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, in mvneta_rxq_drop_pkts()
1499 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); in mvneta_rxq_drop_pkts()
1500 mvneta_frag_free(pp, data); in mvneta_rxq_drop_pkts()
1504 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rxq_drop_pkts()
1508 static int mvneta_rx(struct mvneta_port *pp, int rx_todo, in mvneta_rx() argument
1511 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_rx()
1512 struct net_device *dev = pp->dev; in mvneta_rx()
1518 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); in mvneta_rx()
1544 mvneta_rx_error(pp, rx_desc); in mvneta_rx()
1565 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx()
1576 err = mvneta_rx_refill(pp, rx_desc); in mvneta_rx()
1583 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); in mvneta_rx()
1589 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); in mvneta_rx()
1603 mvneta_rx_csum(pp, rx_status, skb); in mvneta_rx()
1609 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_rx()
1618 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); in mvneta_rx()
1625 struct mvneta_port *pp, struct mvneta_tx_queue *txq) in mvneta_tso_put_hdr() argument
1633 tx_desc->command = mvneta_skb_tx_csum(pp, skb); in mvneta_tso_put_hdr()
1677 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx_tso() local
1706 mvneta_tso_put_hdr(skb, pp, txq); in mvneta_tx_tso()
1734 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_tso()
1744 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, in mvneta_tx_frag_process() argument
1758 dma_map_single(pp->dev->dev.parent, addr, in mvneta_tx_frag_process()
1761 if (dma_mapping_error(pp->dev->dev.parent, in mvneta_tx_frag_process()
1787 dma_unmap_single(pp->dev->dev.parent, in mvneta_tx_frag_process()
1800 struct mvneta_port *pp = netdev_priv(dev); in mvneta_tx() local
1802 struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; in mvneta_tx()
1821 tx_cmd = mvneta_skb_tx_csum(pp, skb); in mvneta_tx()
1848 if (mvneta_tx_frag_process(pp, skb, txq)) { in mvneta_tx()
1861 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); in mvneta_tx()
1865 mvneta_txq_pend_desc_add(pp, txq, frags); in mvneta_tx()
1884 static void mvneta_txq_done_force(struct mvneta_port *pp, in mvneta_txq_done_force() argument
1890 mvneta_txq_bufs_free(pp, txq, tx_done); in mvneta_txq_done_force()
1901 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) in mvneta_tx_done_gbe() argument
1907 txq = mvneta_tx_done_policy(pp, cause_tx_done); in mvneta_tx_done_gbe()
1909 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_tx_done_gbe()
1913 mvneta_txq_done(pp, txq); in mvneta_tx_done_gbe()
1948 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, in mvneta_set_special_mcast_addr() argument
1961 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST in mvneta_set_special_mcast_addr()
1971 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, in mvneta_set_special_mcast_addr()
1983 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, in mvneta_set_other_mcast_addr() argument
1994 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); in mvneta_set_other_mcast_addr()
2004 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); in mvneta_set_other_mcast_addr()
2016 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, in mvneta_mcast_addr_set() argument
2022 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); in mvneta_mcast_addr_set()
2028 if (pp->mcast_count[crc_result] == 0) { in mvneta_mcast_addr_set()
2029 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", in mvneta_mcast_addr_set()
2034 pp->mcast_count[crc_result]--; in mvneta_mcast_addr_set()
2035 if (pp->mcast_count[crc_result] != 0) { in mvneta_mcast_addr_set()
2036 netdev_info(pp->dev, in mvneta_mcast_addr_set()
2038 pp->mcast_count[crc_result], crc_result); in mvneta_mcast_addr_set()
2042 pp->mcast_count[crc_result]++; in mvneta_mcast_addr_set()
2044 mvneta_set_other_mcast_addr(pp, crc_result, queue); in mvneta_mcast_addr_set()
2050 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, in mvneta_rx_unicast_promisc_set() argument
2055 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); in mvneta_rx_unicast_promisc_set()
2057 val = mvreg_read(pp, MVNETA_TYPE_PRIO); in mvneta_rx_unicast_promisc_set()
2064 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); in mvneta_rx_unicast_promisc_set()
2065 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); in mvneta_rx_unicast_promisc_set()
2072 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); in mvneta_rx_unicast_promisc_set()
2073 mvreg_write(pp, MVNETA_TYPE_PRIO, val); in mvneta_rx_unicast_promisc_set()
2079 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_rx_mode() local
2084 mvneta_rx_unicast_promisc_set(pp, 1); in mvneta_set_rx_mode()
2085 mvneta_set_ucast_table(pp, rxq_def); in mvneta_set_rx_mode()
2086 mvneta_set_special_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2087 mvneta_set_other_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2090 mvneta_rx_unicast_promisc_set(pp, 0); in mvneta_set_rx_mode()
2091 mvneta_set_ucast_table(pp, -1); in mvneta_set_rx_mode()
2092 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); in mvneta_set_rx_mode()
2096 mvneta_set_special_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2097 mvneta_set_other_mcast_table(pp, rxq_def); in mvneta_set_rx_mode()
2100 mvneta_set_special_mcast_table(pp, -1); in mvneta_set_rx_mode()
2101 mvneta_set_other_mcast_table(pp, -1); in mvneta_set_rx_mode()
2105 mvneta_mcast_addr_set(pp, ha->addr, in mvneta_set_rx_mode()
2118 disable_percpu_irq(port->pp->dev->irq); in mvneta_isr()
2124 static int mvneta_fixed_link_update(struct mvneta_port *pp, in mvneta_fixed_link_update() argument
2129 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); in mvneta_fixed_link_update()
2157 struct mvneta_port *pp = netdev_priv(napi->dev); in mvneta_poll() local
2158 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); in mvneta_poll()
2160 if (!netif_running(pp->dev)) { in mvneta_poll()
2166 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); in mvneta_poll()
2168 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); in mvneta_poll()
2170 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_poll()
2171 if (pp->use_inband_status && (cause_misc & in mvneta_poll()
2175 mvneta_fixed_link_update(pp, pp->phy_dev); in mvneta_poll()
2181 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); in mvneta_poll()
2189 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); in mvneta_poll()
2195 enable_percpu_irq(pp->dev->irq, 0); in mvneta_poll()
2203 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, in mvneta_rxq_fill() argument
2210 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { in mvneta_rxq_fill()
2211 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", in mvneta_rxq_fill()
2220 mvneta_rxq_non_occup_desc_add(pp, rxq, i); in mvneta_rxq_fill()
2226 static void mvneta_tx_reset(struct mvneta_port *pp) in mvneta_tx_reset() argument
2232 mvneta_txq_done_force(pp, &pp->txqs[queue]); in mvneta_tx_reset()
2234 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); in mvneta_tx_reset()
2235 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); in mvneta_tx_reset()
2238 static void mvneta_rx_reset(struct mvneta_port *pp) in mvneta_rx_reset() argument
2240 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); in mvneta_rx_reset()
2241 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); in mvneta_rx_reset()
2247 static int mvneta_rxq_init(struct mvneta_port *pp, in mvneta_rxq_init() argument
2251 rxq->size = pp->rx_ring_size; in mvneta_rxq_init()
2254 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_rxq_init()
2266 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); in mvneta_rxq_init()
2267 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); in mvneta_rxq_init()
2270 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); in mvneta_rxq_init()
2273 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_rxq_init()
2274 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_rxq_init()
2277 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); in mvneta_rxq_init()
2278 mvneta_rxq_bm_disable(pp, rxq); in mvneta_rxq_init()
2279 mvneta_rxq_fill(pp, rxq, rxq->size); in mvneta_rxq_init()
2285 static void mvneta_rxq_deinit(struct mvneta_port *pp, in mvneta_rxq_deinit() argument
2288 mvneta_rxq_drop_pkts(pp, rxq); in mvneta_rxq_deinit()
2291 dma_free_coherent(pp->dev->dev.parent, in mvneta_rxq_deinit()
2303 static int mvneta_txq_init(struct mvneta_port *pp, in mvneta_txq_init() argument
2306 txq->size = pp->tx_ring_size; in mvneta_txq_init()
2317 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2330 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); in mvneta_txq_init()
2331 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); in mvneta_txq_init()
2334 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); in mvneta_txq_init()
2335 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); in mvneta_txq_init()
2339 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2346 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2351 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_init()
2356 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_txq_init()
2362 static void mvneta_txq_deinit(struct mvneta_port *pp, in mvneta_txq_deinit() argument
2368 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_deinit()
2372 dma_free_coherent(pp->dev->dev.parent, in mvneta_txq_deinit()
2382 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); in mvneta_txq_deinit()
2383 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); in mvneta_txq_deinit()
2386 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); in mvneta_txq_deinit()
2387 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); in mvneta_txq_deinit()
2391 static void mvneta_cleanup_txqs(struct mvneta_port *pp) in mvneta_cleanup_txqs() argument
2396 mvneta_txq_deinit(pp, &pp->txqs[queue]); in mvneta_cleanup_txqs()
2400 static void mvneta_cleanup_rxqs(struct mvneta_port *pp) in mvneta_cleanup_rxqs() argument
2402 mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); in mvneta_cleanup_rxqs()
2407 static int mvneta_setup_rxqs(struct mvneta_port *pp) in mvneta_setup_rxqs() argument
2409 int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); in mvneta_setup_rxqs()
2411 netdev_err(pp->dev, "%s: can't create rxq=%d\n", in mvneta_setup_rxqs()
2413 mvneta_cleanup_rxqs(pp); in mvneta_setup_rxqs()
2421 static int mvneta_setup_txqs(struct mvneta_port *pp) in mvneta_setup_txqs() argument
2426 int err = mvneta_txq_init(pp, &pp->txqs[queue]); in mvneta_setup_txqs()
2428 netdev_err(pp->dev, "%s: can't create txq=%d\n", in mvneta_setup_txqs()
2430 mvneta_cleanup_txqs(pp); in mvneta_setup_txqs()
2438 static void mvneta_start_dev(struct mvneta_port *pp) in mvneta_start_dev() argument
2442 mvneta_max_rx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
2443 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); in mvneta_start_dev()
2446 mvneta_port_enable(pp); in mvneta_start_dev()
2450 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
2456 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_start_dev()
2460 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_start_dev()
2465 phy_start(pp->phy_dev); in mvneta_start_dev()
2466 netif_tx_start_all_queues(pp->dev); in mvneta_start_dev()
2469 static void mvneta_stop_dev(struct mvneta_port *pp) in mvneta_stop_dev() argument
2473 phy_stop(pp->phy_dev); in mvneta_stop_dev()
2476 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
2481 netif_carrier_off(pp->dev); in mvneta_stop_dev()
2483 mvneta_port_down(pp); in mvneta_stop_dev()
2484 netif_tx_stop_all_queues(pp->dev); in mvneta_stop_dev()
2487 mvneta_port_disable(pp); in mvneta_stop_dev()
2490 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); in mvneta_stop_dev()
2491 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); in mvneta_stop_dev()
2494 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_stop_dev()
2495 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_stop_dev()
2496 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_stop_dev()
2498 mvneta_tx_reset(pp); in mvneta_stop_dev()
2499 mvneta_rx_reset(pp); in mvneta_stop_dev()
2528 struct mvneta_port *pp = netdev_priv(dev); in mvneta_change_mtu() local
2545 mvneta_stop_dev(pp); in mvneta_change_mtu()
2547 mvneta_cleanup_txqs(pp); in mvneta_change_mtu()
2548 mvneta_cleanup_rxqs(pp); in mvneta_change_mtu()
2550 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); in mvneta_change_mtu()
2551 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_change_mtu()
2554 ret = mvneta_setup_rxqs(pp); in mvneta_change_mtu()
2560 ret = mvneta_setup_txqs(pp); in mvneta_change_mtu()
2566 mvneta_start_dev(pp); in mvneta_change_mtu()
2567 mvneta_port_up(pp); in mvneta_change_mtu()
2577 struct mvneta_port *pp = netdev_priv(dev); in mvneta_fix_features() local
2579 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { in mvneta_fix_features()
2583 pp->tx_csum_limit); in mvneta_fix_features()
2590 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) in mvneta_get_mac_addr() argument
2594 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); in mvneta_get_mac_addr()
2595 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); in mvneta_get_mac_addr()
2607 struct mvneta_port *pp = netdev_priv(dev); in mvneta_set_mac_addr() local
2615 mvneta_mac_addr_set(pp, dev->dev_addr, -1); in mvneta_set_mac_addr()
2618 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); in mvneta_set_mac_addr()
2626 struct mvneta_port *pp = netdev_priv(ndev); in mvneta_adjust_link() local
2627 struct phy_device *phydev = pp->phy_dev; in mvneta_adjust_link()
2631 if ((pp->speed != phydev->speed) || in mvneta_adjust_link()
2632 (pp->duplex != phydev->duplex)) { in mvneta_adjust_link()
2635 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); in mvneta_adjust_link()
2648 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); in mvneta_adjust_link()
2650 pp->duplex = phydev->duplex; in mvneta_adjust_link()
2651 pp->speed = phydev->speed; in mvneta_adjust_link()
2655 if (phydev->link != pp->link) { in mvneta_adjust_link()
2657 pp->duplex = -1; in mvneta_adjust_link()
2658 pp->speed = 0; in mvneta_adjust_link()
2661 pp->link = phydev->link; in mvneta_adjust_link()
2667 if (!pp->use_inband_status) { in mvneta_adjust_link()
2668 u32 val = mvreg_read(pp, in mvneta_adjust_link()
2672 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_adjust_link()
2675 mvneta_port_up(pp); in mvneta_adjust_link()
2677 if (!pp->use_inband_status) { in mvneta_adjust_link()
2678 u32 val = mvreg_read(pp, in mvneta_adjust_link()
2682 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, in mvneta_adjust_link()
2685 mvneta_port_down(pp); in mvneta_adjust_link()
2691 static int mvneta_mdio_probe(struct mvneta_port *pp) in mvneta_mdio_probe() argument
2695 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, in mvneta_mdio_probe()
2696 pp->phy_interface); in mvneta_mdio_probe()
2698 netdev_err(pp->dev, "could not find the PHY\n"); in mvneta_mdio_probe()
2705 pp->phy_dev = phy_dev; in mvneta_mdio_probe()
2706 pp->link = 0; in mvneta_mdio_probe()
2707 pp->duplex = 0; in mvneta_mdio_probe()
2708 pp->speed = 0; in mvneta_mdio_probe()
2713 static void mvneta_mdio_remove(struct mvneta_port *pp) in mvneta_mdio_remove() argument
2715 phy_disconnect(pp->phy_dev); in mvneta_mdio_remove()
2716 pp->phy_dev = NULL; in mvneta_mdio_remove()
2721 struct mvneta_port *pp = arg; in mvneta_percpu_enable() local
2723 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); in mvneta_percpu_enable()
2728 struct mvneta_port *pp = arg; in mvneta_percpu_disable() local
2730 disable_percpu_irq(pp->dev->irq); in mvneta_percpu_disable()
2733 static void mvneta_percpu_elect(struct mvneta_port *pp) in mvneta_percpu_elect() argument
2745 pp, true); in mvneta_percpu_elect()
2749 pp, true); in mvneta_percpu_elect()
2757 struct mvneta_port *pp = container_of(nfb, struct mvneta_port, in mvneta_percpu_notifier() local
2760 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_percpu_notifier()
2765 netif_tx_stop_all_queues(pp->dev); in mvneta_percpu_notifier()
2773 per_cpu_ptr(pp->ports, other_cpu); in mvneta_percpu_notifier()
2780 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_percpu_notifier()
2781 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_percpu_notifier()
2782 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_percpu_notifier()
2788 mvneta_percpu_elect(pp); in mvneta_percpu_notifier()
2791 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_percpu_notifier()
2795 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_percpu_notifier()
2799 netif_tx_start_all_queues(pp->dev); in mvneta_percpu_notifier()
2803 netif_tx_stop_all_queues(pp->dev); in mvneta_percpu_notifier()
2805 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); in mvneta_percpu_notifier()
2806 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); in mvneta_percpu_notifier()
2807 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); in mvneta_percpu_notifier()
2815 pp, true); in mvneta_percpu_notifier()
2821 mvneta_percpu_elect(pp); in mvneta_percpu_notifier()
2823 mvreg_write(pp, MVNETA_INTR_NEW_MASK, in mvneta_percpu_notifier()
2827 mvreg_write(pp, MVNETA_INTR_MISC_MASK, in mvneta_percpu_notifier()
2831 netif_tx_start_all_queues(pp->dev); in mvneta_percpu_notifier()
2840 struct mvneta_port *pp = netdev_priv(dev); in mvneta_open() local
2843 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); in mvneta_open()
2844 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + in mvneta_open()
2847 ret = mvneta_setup_rxqs(pp); in mvneta_open()
2851 ret = mvneta_setup_txqs(pp); in mvneta_open()
2856 ret = request_percpu_irq(pp->dev->irq, mvneta_isr, in mvneta_open()
2857 MVNETA_DRIVER_NAME, pp->ports); in mvneta_open()
2859 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); in mvneta_open()
2869 mvneta_percpu_disable(pp); in mvneta_open()
2872 mvneta_percpu_elect(pp); in mvneta_open()
2877 register_cpu_notifier(&pp->cpu_notifier); in mvneta_open()
2880 netif_carrier_off(pp->dev); in mvneta_open()
2882 ret = mvneta_mdio_probe(pp); in mvneta_open()
2888 mvneta_start_dev(pp); in mvneta_open()
2893 free_percpu_irq(pp->dev->irq, pp->ports); in mvneta_open()
2895 mvneta_cleanup_txqs(pp); in mvneta_open()
2897 mvneta_cleanup_rxqs(pp); in mvneta_open()
2904 struct mvneta_port *pp = netdev_priv(dev); in mvneta_stop() local
2907 mvneta_stop_dev(pp); in mvneta_stop()
2908 mvneta_mdio_remove(pp); in mvneta_stop()
2909 unregister_cpu_notifier(&pp->cpu_notifier); in mvneta_stop()
2911 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); in mvneta_stop()
2912 free_percpu_irq(dev->irq, pp->ports); in mvneta_stop()
2913 mvneta_cleanup_rxqs(pp); in mvneta_stop()
2914 mvneta_cleanup_txqs(pp); in mvneta_stop()
2921 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ioctl() local
2923 if (!pp->phy_dev) in mvneta_ioctl()
2926 return phy_mii_ioctl(pp->phy_dev, ifr, cmd); in mvneta_ioctl()
2934 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_settings() local
2936 if (!pp->phy_dev) in mvneta_ethtool_get_settings()
2939 return phy_ethtool_gset(pp->phy_dev, cmd); in mvneta_ethtool_get_settings()
2945 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_settings() local
2947 if (!pp->phy_dev) in mvneta_ethtool_set_settings()
2950 return phy_ethtool_sset(pp->phy_dev, cmd); in mvneta_ethtool_set_settings()
2957 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_coalesce() local
2961 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_ethtool_set_coalesce()
2964 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); in mvneta_ethtool_set_coalesce()
2965 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); in mvneta_ethtool_set_coalesce()
2969 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_ethtool_set_coalesce()
2971 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); in mvneta_ethtool_set_coalesce()
2981 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_coalesce() local
2983 c->rx_coalesce_usecs = pp->rxqs[0].time_coal; in mvneta_ethtool_get_coalesce()
2984 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; in mvneta_ethtool_get_coalesce()
2986 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; in mvneta_ethtool_get_coalesce()
3006 struct mvneta_port *pp = netdev_priv(netdev); in mvneta_ethtool_get_ringparam() local
3010 ring->rx_pending = pp->rx_ring_size; in mvneta_ethtool_get_ringparam()
3011 ring->tx_pending = pp->tx_ring_size; in mvneta_ethtool_get_ringparam()
3017 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_set_ringparam() local
3021 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? in mvneta_ethtool_set_ringparam()
3024 pp->tx_ring_size = clamp_t(u16, ring->tx_pending, in mvneta_ethtool_set_ringparam()
3026 if (pp->tx_ring_size != ring->tx_pending) in mvneta_ethtool_set_ringparam()
3028 pp->tx_ring_size, ring->tx_pending); in mvneta_ethtool_set_ringparam()
3054 static void mvneta_ethtool_update_stats(struct mvneta_port *pp) in mvneta_ethtool_update_stats() argument
3057 void __iomem *base = pp->base; in mvneta_ethtool_update_stats()
3078 pp->ethtool_stats[i] += val; in mvneta_ethtool_update_stats()
3085 struct mvneta_port *pp = netdev_priv(dev); in mvneta_ethtool_get_stats() local
3088 mvneta_ethtool_update_stats(pp); in mvneta_ethtool_get_stats()
3091 *data++ = pp->ethtool_stats[i]; in mvneta_ethtool_get_stats()
3128 static int mvneta_init(struct device *dev, struct mvneta_port *pp) in mvneta_init() argument
3133 mvneta_port_disable(pp); in mvneta_init()
3136 mvneta_defaults_set(pp); in mvneta_init()
3138 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue), in mvneta_init()
3140 if (!pp->txqs) in mvneta_init()
3145 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_init()
3147 txq->size = pp->tx_ring_size; in mvneta_init()
3151 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue), in mvneta_init()
3153 if (!pp->rxqs) in mvneta_init()
3158 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_init()
3160 rxq->size = pp->rx_ring_size; in mvneta_init()
3169 static void mvneta_conf_mbus_windows(struct mvneta_port *pp, in mvneta_conf_mbus_windows() argument
3177 mvreg_write(pp, MVNETA_WIN_BASE(i), 0); in mvneta_conf_mbus_windows()
3178 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); in mvneta_conf_mbus_windows()
3181 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); in mvneta_conf_mbus_windows()
3189 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | in mvneta_conf_mbus_windows()
3192 mvreg_write(pp, MVNETA_WIN_SIZE(i), in mvneta_conf_mbus_windows()
3199 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); in mvneta_conf_mbus_windows()
3200 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); in mvneta_conf_mbus_windows()
3204 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) in mvneta_port_power_up() argument
3209 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); in mvneta_port_power_up()
3211 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); in mvneta_port_power_up()
3218 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); in mvneta_port_power_up()
3222 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); in mvneta_port_power_up()
3233 if (pp->use_inband_status) in mvneta_port_power_up()
3238 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); in mvneta_port_power_up()
3240 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & in mvneta_port_power_up()
3254 struct mvneta_port *pp; in mvneta_probe() local
3308 pp = netdev_priv(dev); in mvneta_probe()
3309 pp->phy_node = phy_node; in mvneta_probe()
3310 pp->phy_interface = phy_mode; in mvneta_probe()
3313 pp->use_inband_status = (err == 0 && in mvneta_probe()
3315 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; in mvneta_probe()
3317 pp->clk = devm_clk_get(&pdev->dev, NULL); in mvneta_probe()
3318 if (IS_ERR(pp->clk)) { in mvneta_probe()
3319 err = PTR_ERR(pp->clk); in mvneta_probe()
3323 clk_prepare_enable(pp->clk); in mvneta_probe()
3326 pp->base = devm_ioremap_resource(&pdev->dev, res); in mvneta_probe()
3327 if (IS_ERR(pp->base)) { in mvneta_probe()
3328 err = PTR_ERR(pp->base); in mvneta_probe()
3333 pp->ports = alloc_percpu(struct mvneta_pcpu_port); in mvneta_probe()
3334 if (!pp->ports) { in mvneta_probe()
3340 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); in mvneta_probe()
3341 if (!pp->stats) { in mvneta_probe()
3351 mvneta_get_mac_addr(pp, hw_mac_addr); in mvneta_probe()
3375 pp->tx_csum_limit = tx_csum_limit; in mvneta_probe()
3377 pp->tx_ring_size = MVNETA_MAX_TXD; in mvneta_probe()
3378 pp->rx_ring_size = MVNETA_MAX_RXD; in mvneta_probe()
3380 pp->dev = dev; in mvneta_probe()
3383 err = mvneta_init(&pdev->dev, pp); in mvneta_probe()
3387 err = mvneta_port_power_up(pp, phy_mode); in mvneta_probe()
3395 mvneta_conf_mbus_windows(pp, dram_target_info); in mvneta_probe()
3398 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
3401 port->pp = pp; in mvneta_probe()
3419 platform_set_drvdata(pdev, pp->dev); in mvneta_probe()
3421 if (pp->use_inband_status) { in mvneta_probe()
3424 mvneta_fixed_link_update(pp, phy); in mvneta_probe()
3432 free_percpu(pp->stats); in mvneta_probe()
3434 free_percpu(pp->ports); in mvneta_probe()
3436 clk_disable_unprepare(pp->clk); in mvneta_probe()
3450 struct mvneta_port *pp = netdev_priv(dev); in mvneta_remove() local
3453 clk_disable_unprepare(pp->clk); in mvneta_remove()
3454 free_percpu(pp->ports); in mvneta_remove()
3455 free_percpu(pp->stats); in mvneta_remove()
3457 of_node_put(pp->phy_node); in mvneta_remove()