root/drivers/net/ethernet/marvell/mvneta.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mvreg_write
  2. mvreg_read
  3. mvneta_txq_inc_get
  4. mvneta_txq_inc_put
  5. mvneta_mib_counters_clear
  6. mvneta_get_stats64
  7. mvneta_rxq_desc_is_first_last
  8. mvneta_rxq_non_occup_desc_add
  9. mvneta_rxq_busy_desc_num_get
  10. mvneta_rxq_desc_num_update
  11. mvneta_rxq_next_desc_get
  12. mvneta_max_rx_size_set
  13. mvneta_rxq_offset_set
  14. mvneta_txq_pend_desc_add
  15. mvneta_txq_next_desc_get
  16. mvneta_txq_desc_put
  17. mvneta_rxq_buf_size_set
  18. mvneta_rxq_bm_disable
  19. mvneta_rxq_bm_enable
  20. mvneta_rxq_long_pool_set
  21. mvneta_rxq_short_pool_set
  22. mvneta_bm_pool_bufsize_set
  23. mvneta_mbus_io_win_set
  24. mvneta_bm_port_mbus_init
  25. mvneta_bm_port_init
  26. mvneta_bm_update_mtu
  27. mvneta_port_up
  28. mvneta_port_down
  29. mvneta_port_enable
  30. mvneta_port_disable
  31. mvneta_set_ucast_table
  32. mvneta_set_special_mcast_table
  33. mvneta_set_other_mcast_table
  34. mvneta_percpu_unmask_interrupt
  35. mvneta_percpu_mask_interrupt
  36. mvneta_percpu_clear_intr_cause
  37. mvneta_defaults_set
  38. mvneta_txq_max_tx_size_set
  39. mvneta_set_ucast_addr
  40. mvneta_mac_addr_set
  41. mvneta_rx_pkts_coal_set
  42. mvneta_rx_time_coal_set
  43. mvneta_tx_done_pkts_coal_set
  44. mvneta_rx_desc_fill
  45. mvneta_txq_sent_desc_dec
  46. mvneta_txq_sent_desc_num_get
  47. mvneta_txq_sent_desc_proc
  48. mvneta_txq_desc_csum
  49. mvneta_rx_error
  50. mvneta_rx_csum
  51. mvneta_tx_done_policy
  52. mvneta_txq_bufs_free
  53. mvneta_txq_done
  54. mvneta_rx_refill
  55. mvneta_skb_tx_csum
  56. mvneta_rxq_drop_pkts
  57. mvneta_rx_refill_queue
  58. mvneta_rx_swbm
  59. mvneta_rx_hwbm
  60. mvneta_tso_put_hdr
  61. mvneta_tso_put_data
  62. mvneta_tx_tso
  63. mvneta_tx_frag_process
  64. mvneta_tx
  65. mvneta_txq_done_force
  66. mvneta_tx_done_gbe
  67. mvneta_addr_crc
  68. mvneta_set_special_mcast_addr
  69. mvneta_set_other_mcast_addr
  70. mvneta_mcast_addr_set
  71. mvneta_rx_unicast_promisc_set
  72. mvneta_set_rx_mode
  73. mvneta_isr
  74. mvneta_percpu_isr
  75. mvneta_link_change
  76. mvneta_poll
  77. mvneta_rxq_fill
  78. mvneta_tx_reset
  79. mvneta_rx_reset
  80. mvneta_rxq_sw_init
  81. mvneta_rxq_hw_init
  82. mvneta_rxq_init
  83. mvneta_rxq_deinit
  84. mvneta_txq_sw_init
  85. mvneta_txq_hw_init
  86. mvneta_txq_init
  87. mvneta_txq_sw_deinit
  88. mvneta_txq_hw_deinit
  89. mvneta_txq_deinit
  90. mvneta_cleanup_txqs
  91. mvneta_cleanup_rxqs
  92. mvneta_setup_rxqs
  93. mvneta_setup_txqs
  94. mvneta_comphy_init
  95. mvneta_start_dev
  96. mvneta_stop_dev
  97. mvneta_percpu_enable
  98. mvneta_percpu_disable
  99. mvneta_change_mtu
  100. mvneta_fix_features
  101. mvneta_get_mac_addr
  102. mvneta_set_mac_addr
  103. mvneta_validate
  104. mvneta_mac_link_state
  105. mvneta_mac_an_restart
  106. mvneta_mac_config
  107. mvneta_set_eee
  108. mvneta_mac_link_down
  109. mvneta_mac_link_up
  110. mvneta_mdio_probe
  111. mvneta_mdio_remove
  112. mvneta_percpu_elect
  113. mvneta_cpu_online
  114. mvneta_cpu_down_prepare
  115. mvneta_cpu_dead
  116. mvneta_open
  117. mvneta_stop
  118. mvneta_ioctl
  119. mvneta_ethtool_set_link_ksettings
  120. mvneta_ethtool_get_link_ksettings
  121. mvneta_ethtool_nway_reset
  122. mvneta_ethtool_set_coalesce
  123. mvneta_ethtool_get_coalesce
  124. mvneta_ethtool_get_drvinfo
  125. mvneta_ethtool_get_ringparam
  126. mvneta_ethtool_set_ringparam
  127. mvneta_ethtool_get_pauseparam
  128. mvneta_ethtool_set_pauseparam
  129. mvneta_ethtool_get_strings
  130. mvneta_ethtool_update_stats
  131. mvneta_ethtool_get_stats
  132. mvneta_ethtool_get_sset_count
  133. mvneta_ethtool_get_rxfh_indir_size
  134. mvneta_ethtool_get_rxnfc
  135. mvneta_config_rss
  136. mvneta_ethtool_set_rxfh
  137. mvneta_ethtool_get_rxfh
  138. mvneta_ethtool_get_wol
  139. mvneta_ethtool_set_wol
  140. mvneta_ethtool_get_eee
  141. mvneta_ethtool_set_eee
  142. mvneta_init
  143. mvneta_conf_mbus_windows
  144. mvneta_port_power_up
  145. mvneta_probe
  146. mvneta_remove
  147. mvneta_suspend
  148. mvneta_resume
  149. mvneta_driver_init
  150. mvneta_driver_exit

   1 /*
   2  * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
   3  *
   4  * Copyright (C) 2012 Marvell
   5  *
   6  * Rami Rosen <rosenr@marvell.com>
   7  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
   8  *
   9  * This file is licensed under the terms of the GNU General Public
  10  * License version 2. This program is licensed "as is" without any
  11  * warranty of any kind, whether express or implied.
  12  */
  13 
  14 #include <linux/clk.h>
  15 #include <linux/cpu.h>
  16 #include <linux/etherdevice.h>
  17 #include <linux/if_vlan.h>
  18 #include <linux/inetdevice.h>
  19 #include <linux/interrupt.h>
  20 #include <linux/io.h>
  21 #include <linux/kernel.h>
  22 #include <linux/mbus.h>
  23 #include <linux/module.h>
  24 #include <linux/netdevice.h>
  25 #include <linux/of.h>
  26 #include <linux/of_address.h>
  27 #include <linux/of_irq.h>
  28 #include <linux/of_mdio.h>
  29 #include <linux/of_net.h>
  30 #include <linux/phy/phy.h>
  31 #include <linux/phy.h>
  32 #include <linux/phylink.h>
  33 #include <linux/platform_device.h>
  34 #include <linux/skbuff.h>
  35 #include <net/hwbm.h>
  36 #include "mvneta_bm.h"
  37 #include <net/ip.h>
  38 #include <net/ipv6.h>
  39 #include <net/tso.h>
  40 
  41 /* Registers */
  42 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
  43 #define      MVNETA_RXQ_HW_BUF_ALLOC            BIT(0)
  44 #define      MVNETA_RXQ_SHORT_POOL_ID_SHIFT     4
  45 #define      MVNETA_RXQ_SHORT_POOL_ID_MASK      0x30
  46 #define      MVNETA_RXQ_LONG_POOL_ID_SHIFT      6
  47 #define      MVNETA_RXQ_LONG_POOL_ID_MASK       0xc0
  48 #define      MVNETA_RXQ_PKT_OFFSET_ALL_MASK     (0xf    << 8)
  49 #define      MVNETA_RXQ_PKT_OFFSET_MASK(offs)   ((offs) << 8)
  50 #define MVNETA_RXQ_THRESHOLD_REG(q)             (0x14c0 + ((q) << 2))
  51 #define      MVNETA_RXQ_NON_OCCUPIED(v)         ((v) << 16)
  52 #define MVNETA_RXQ_BASE_ADDR_REG(q)             (0x1480 + ((q) << 2))
  53 #define MVNETA_RXQ_SIZE_REG(q)                  (0x14a0 + ((q) << 2))
  54 #define      MVNETA_RXQ_BUF_SIZE_SHIFT          19
  55 #define      MVNETA_RXQ_BUF_SIZE_MASK           (0x1fff << 19)
  56 #define MVNETA_RXQ_STATUS_REG(q)                (0x14e0 + ((q) << 2))
  57 #define      MVNETA_RXQ_OCCUPIED_ALL_MASK       0x3fff
  58 #define MVNETA_RXQ_STATUS_UPDATE_REG(q)         (0x1500 + ((q) << 2))
  59 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT  16
  60 #define      MVNETA_RXQ_ADD_NON_OCCUPIED_MAX    255
  61 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool)    (0x1700 + ((pool) << 2))
  62 #define      MVNETA_PORT_POOL_BUFFER_SZ_SHIFT   3
  63 #define      MVNETA_PORT_POOL_BUFFER_SZ_MASK    0xfff8
  64 #define MVNETA_PORT_RX_RESET                    0x1cc0
  65 #define      MVNETA_PORT_RX_DMA_RESET           BIT(0)
  66 #define MVNETA_PHY_ADDR                         0x2000
  67 #define      MVNETA_PHY_ADDR_MASK               0x1f
  68 #define MVNETA_MBUS_RETRY                       0x2010
  69 #define MVNETA_UNIT_INTR_CAUSE                  0x2080
  70 #define MVNETA_UNIT_CONTROL                     0x20B0
  71 #define      MVNETA_PHY_POLLING_ENABLE          BIT(1)
  72 #define MVNETA_WIN_BASE(w)                      (0x2200 + ((w) << 3))
  73 #define MVNETA_WIN_SIZE(w)                      (0x2204 + ((w) << 3))
  74 #define MVNETA_WIN_REMAP(w)                     (0x2280 + ((w) << 2))
  75 #define MVNETA_BASE_ADDR_ENABLE                 0x2290
  76 #define MVNETA_ACCESS_PROTECT_ENABLE            0x2294
  77 #define MVNETA_PORT_CONFIG                      0x2400
  78 #define      MVNETA_UNI_PROMISC_MODE            BIT(0)
  79 #define      MVNETA_DEF_RXQ(q)                  ((q) << 1)
  80 #define      MVNETA_DEF_RXQ_ARP(q)              ((q) << 4)
  81 #define      MVNETA_TX_UNSET_ERR_SUM            BIT(12)
  82 #define      MVNETA_DEF_RXQ_TCP(q)              ((q) << 16)
  83 #define      MVNETA_DEF_RXQ_UDP(q)              ((q) << 19)
  84 #define      MVNETA_DEF_RXQ_BPDU(q)             ((q) << 22)
  85 #define      MVNETA_RX_CSUM_WITH_PSEUDO_HDR     BIT(25)
  86 #define      MVNETA_PORT_CONFIG_DEFL_VALUE(q)   (MVNETA_DEF_RXQ(q)       | \
  87                                                  MVNETA_DEF_RXQ_ARP(q)   | \
  88                                                  MVNETA_DEF_RXQ_TCP(q)   | \
  89                                                  MVNETA_DEF_RXQ_UDP(q)   | \
  90                                                  MVNETA_DEF_RXQ_BPDU(q)  | \
  91                                                  MVNETA_TX_UNSET_ERR_SUM | \
  92                                                  MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
  93 #define MVNETA_PORT_CONFIG_EXTEND                0x2404
  94 #define MVNETA_MAC_ADDR_LOW                      0x2414
  95 #define MVNETA_MAC_ADDR_HIGH                     0x2418
  96 #define MVNETA_SDMA_CONFIG                       0x241c
  97 #define      MVNETA_SDMA_BRST_SIZE_16            4
  98 #define      MVNETA_RX_BRST_SZ_MASK(burst)       ((burst) << 1)
  99 #define      MVNETA_RX_NO_DATA_SWAP              BIT(4)
 100 #define      MVNETA_TX_NO_DATA_SWAP              BIT(5)
 101 #define      MVNETA_DESC_SWAP                    BIT(6)
 102 #define      MVNETA_TX_BRST_SZ_MASK(burst)       ((burst) << 22)
 103 #define MVNETA_PORT_STATUS                       0x2444
 104 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 105 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 106 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
 107 #define MVNETA_SERDES_CFG                        0x24A0
 108 #define      MVNETA_SGMII_SERDES_PROTO           0x0cc7
 109 #define      MVNETA_QSGMII_SERDES_PROTO          0x0667
 110 #define MVNETA_TYPE_PRIO                         0x24bc
 111 #define      MVNETA_FORCE_UNI                    BIT(21)
 112 #define MVNETA_TXQ_CMD_1                         0x24e4
 113 #define MVNETA_TXQ_CMD                           0x2448
 114 #define      MVNETA_TXQ_DISABLE_SHIFT            8
 115 #define      MVNETA_TXQ_ENABLE_MASK              0x000000ff
 116 #define MVNETA_RX_DISCARD_FRAME_COUNT            0x2484
 117 #define MVNETA_OVERRUN_FRAME_COUNT               0x2488
 118 #define MVNETA_GMAC_CLOCK_DIVIDER                0x24f4
 119 #define      MVNETA_GMAC_1MS_CLOCK_ENABLE        BIT(31)
 120 #define MVNETA_ACC_MODE                          0x2500
 121 #define MVNETA_BM_ADDRESS                        0x2504
 122 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 123 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 124 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
 125 #define      MVNETA_CPU_RXQ_ACCESS(rxq)          BIT(rxq)
 126 #define      MVNETA_CPU_TXQ_ACCESS(txq)          BIT(txq + 8)
 127 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 128 
 129 /* Exception Interrupt Port/Queue Cause register
 130  *
 131  * Their behavior depend of the mapping done using the PCPX2Q
 132  * registers. For a given CPU if the bit associated to a queue is not
 133  * set, then for the register a read from this CPU will always return
 134  * 0 and a write won't do anything
 135  */
 136 
 137 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
 138 #define MVNETA_INTR_NEW_MASK                     0x25a4
 139 
 140 /* bits  0..7  = TXQ SENT, one bit per queue.
 141  * bits  8..15 = RXQ OCCUP, one bit per queue.
 142  * bits 16..23 = RXQ FREE, one bit per queue.
 143  * bit  29 = OLD_REG_SUM, see old reg ?
 144  * bit  30 = TX_ERR_SUM, one bit for 4 ports
 145  * bit  31 = MISC_SUM,   one bit for 4 ports
 146  */
 147 #define      MVNETA_TX_INTR_MASK(nr_txqs)        (((1 << nr_txqs) - 1) << 0)
 148 #define      MVNETA_TX_INTR_MASK_ALL             (0xff << 0)
 149 #define      MVNETA_RX_INTR_MASK(nr_rxqs)        (((1 << nr_rxqs) - 1) << 8)
 150 #define      MVNETA_RX_INTR_MASK_ALL             (0xff << 8)
 151 #define      MVNETA_MISCINTR_INTR_MASK           BIT(31)
 152 
 153 #define MVNETA_INTR_OLD_CAUSE                    0x25a8
 154 #define MVNETA_INTR_OLD_MASK                     0x25ac
 155 
 156 /* Data Path Port/Queue Cause Register */
 157 #define MVNETA_INTR_MISC_CAUSE                   0x25b0
 158 #define MVNETA_INTR_MISC_MASK                    0x25b4
 159 
 160 #define      MVNETA_CAUSE_PHY_STATUS_CHANGE      BIT(0)
 161 #define      MVNETA_CAUSE_LINK_CHANGE            BIT(1)
 162 #define      MVNETA_CAUSE_PTP                    BIT(4)
 163 
 164 #define      MVNETA_CAUSE_INTERNAL_ADDR_ERR      BIT(7)
 165 #define      MVNETA_CAUSE_RX_OVERRUN             BIT(8)
 166 #define      MVNETA_CAUSE_RX_CRC_ERROR           BIT(9)
 167 #define      MVNETA_CAUSE_RX_LARGE_PKT           BIT(10)
 168 #define      MVNETA_CAUSE_TX_UNDERUN             BIT(11)
 169 #define      MVNETA_CAUSE_PRBS_ERR               BIT(12)
 170 #define      MVNETA_CAUSE_PSC_SYNC_CHANGE        BIT(13)
 171 #define      MVNETA_CAUSE_SERDES_SYNC_ERR        BIT(14)
 172 
 173 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT    16
 174 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
 175 #define      MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
 176 
 177 #define      MVNETA_CAUSE_TXQ_ERROR_SHIFT        24
 178 #define      MVNETA_CAUSE_TXQ_ERROR_ALL_MASK     (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
 179 #define      MVNETA_CAUSE_TXQ_ERROR_MASK(q)      (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
 180 
 181 #define MVNETA_INTR_ENABLE                       0x25b8
 182 #define      MVNETA_TXQ_INTR_ENABLE_ALL_MASK     0x0000ff00
 183 #define      MVNETA_RXQ_INTR_ENABLE_ALL_MASK     0x000000ff
 184 
 185 #define MVNETA_RXQ_CMD                           0x2680
 186 #define      MVNETA_RXQ_DISABLE_SHIFT            8
 187 #define      MVNETA_RXQ_ENABLE_MASK              0x000000ff
 188 #define MVETH_TXQ_TOKEN_COUNT_REG(q)             (0x2700 + ((q) << 4))
 189 #define MVETH_TXQ_TOKEN_CFG_REG(q)               (0x2704 + ((q) << 4))
 190 #define MVNETA_GMAC_CTRL_0                       0x2c00
 191 #define      MVNETA_GMAC_MAX_RX_SIZE_SHIFT       2
 192 #define      MVNETA_GMAC_MAX_RX_SIZE_MASK        0x7ffc
 193 #define      MVNETA_GMAC0_PORT_1000BASE_X        BIT(1)
 194 #define      MVNETA_GMAC0_PORT_ENABLE            BIT(0)
 195 #define MVNETA_GMAC_CTRL_2                       0x2c08
 196 #define      MVNETA_GMAC2_INBAND_AN_ENABLE       BIT(0)
 197 #define      MVNETA_GMAC2_PCS_ENABLE             BIT(3)
 198 #define      MVNETA_GMAC2_PORT_RGMII             BIT(4)
 199 #define      MVNETA_GMAC2_PORT_RESET             BIT(6)
 200 #define MVNETA_GMAC_STATUS                       0x2c10
 201 #define      MVNETA_GMAC_LINK_UP                 BIT(0)
 202 #define      MVNETA_GMAC_SPEED_1000              BIT(1)
 203 #define      MVNETA_GMAC_SPEED_100               BIT(2)
 204 #define      MVNETA_GMAC_FULL_DUPLEX             BIT(3)
 205 #define      MVNETA_GMAC_RX_FLOW_CTRL_ENABLE     BIT(4)
 206 #define      MVNETA_GMAC_TX_FLOW_CTRL_ENABLE     BIT(5)
 207 #define      MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE     BIT(6)
 208 #define      MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE     BIT(7)
 209 #define      MVNETA_GMAC_AN_COMPLETE             BIT(11)
 210 #define      MVNETA_GMAC_SYNC_OK                 BIT(14)
 211 #define MVNETA_GMAC_AUTONEG_CONFIG               0x2c0c
 212 #define      MVNETA_GMAC_FORCE_LINK_DOWN         BIT(0)
 213 #define      MVNETA_GMAC_FORCE_LINK_PASS         BIT(1)
 214 #define      MVNETA_GMAC_INBAND_AN_ENABLE        BIT(2)
 215 #define      MVNETA_GMAC_AN_BYPASS_ENABLE        BIT(3)
 216 #define      MVNETA_GMAC_INBAND_RESTART_AN       BIT(4)
 217 #define      MVNETA_GMAC_CONFIG_MII_SPEED        BIT(5)
 218 #define      MVNETA_GMAC_CONFIG_GMII_SPEED       BIT(6)
 219 #define      MVNETA_GMAC_AN_SPEED_EN             BIT(7)
 220 #define      MVNETA_GMAC_CONFIG_FLOW_CTRL        BIT(8)
 221 #define      MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL    BIT(9)
 222 #define      MVNETA_GMAC_AN_FLOW_CTRL_EN         BIT(11)
 223 #define      MVNETA_GMAC_CONFIG_FULL_DUPLEX      BIT(12)
 224 #define      MVNETA_GMAC_AN_DUPLEX_EN            BIT(13)
 225 #define MVNETA_GMAC_CTRL_4                       0x2c90
 226 #define      MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE  BIT(1)
 227 #define MVNETA_MIB_COUNTERS_BASE                 0x3000
 228 #define      MVNETA_MIB_LATE_COLLISION           0x7c
 229 #define MVNETA_DA_FILT_SPEC_MCAST                0x3400
 230 #define MVNETA_DA_FILT_OTH_MCAST                 0x3500
 231 #define MVNETA_DA_FILT_UCAST_BASE                0x3600
 232 #define MVNETA_TXQ_BASE_ADDR_REG(q)              (0x3c00 + ((q) << 2))
 233 #define MVNETA_TXQ_SIZE_REG(q)                   (0x3c20 + ((q) << 2))
 234 #define      MVNETA_TXQ_SENT_THRESH_ALL_MASK     0x3fff0000
 235 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 236 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 237 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
 238 #define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 239 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 240 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 241 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
 242 #define MVNETA_PORT_TX_RESET                     0x3cf0
 243 #define      MVNETA_PORT_TX_DMA_RESET            BIT(0)
 244 #define MVNETA_TX_MTU                            0x3e0c
 245 #define MVNETA_TX_TOKEN_SIZE                     0x3e14
 246 #define      MVNETA_TX_TOKEN_SIZE_MAX            0xffffffff
 247 #define MVNETA_TXQ_TOKEN_SIZE_REG(q)             (0x3e40 + ((q) << 2))
 248 #define      MVNETA_TXQ_TOKEN_SIZE_MAX           0x7fffffff
 249 
 250 #define MVNETA_LPI_CTRL_0                        0x2cc0
 251 #define MVNETA_LPI_CTRL_1                        0x2cc4
 252 #define      MVNETA_LPI_REQUEST_ENABLE           BIT(0)
 253 #define MVNETA_LPI_CTRL_2                        0x2cc8
 254 #define MVNETA_LPI_STATUS                        0x2ccc
 255 
 256 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK      0xff
 257 
 258 /* Descriptor ring Macros */
 259 #define MVNETA_QUEUE_NEXT_DESC(q, index)        \
 260         (((index) < (q)->last_desc) ? ((index) + 1) : 0)
 261 
 262 /* Various constants */
 263 
 264 /* Coalescing */
 265 #define MVNETA_TXDONE_COAL_PKTS         0       /* interrupt per packet */
 266 #define MVNETA_RX_COAL_PKTS             32
 267 #define MVNETA_RX_COAL_USEC             100
 268 
 269 /* The two bytes Marvell header. Either contains a special value used
 270  * by Marvell switches when a specific hardware mode is enabled (not
 271  * supported by this driver) or is filled automatically by zeroes on
 272  * the RX side. Those two bytes being at the front of the Ethernet
 273  * header, they allow to have the IP header aligned on a 4 bytes
 274  * boundary automatically: the hardware skips those two bytes on its
 275  * own.
 276  */
 277 #define MVNETA_MH_SIZE                  2
 278 
 279 #define MVNETA_VLAN_TAG_LEN             4
 280 
 281 #define MVNETA_TX_CSUM_DEF_SIZE         1600
 282 #define MVNETA_TX_CSUM_MAX_SIZE         9800
 283 #define MVNETA_ACC_MODE_EXT1            1
 284 #define MVNETA_ACC_MODE_EXT2            2
 285 
 286 #define MVNETA_MAX_DECODE_WIN           6
 287 
 288 /* Timeout constants */
 289 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC  1000
 290 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC  1000
 291 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT    10000
 292 
 293 #define MVNETA_TX_MTU_MAX               0x3ffff
 294 
 295 /* The RSS lookup table actually has 256 entries but we do not use
 296  * them yet
 297  */
 298 #define MVNETA_RSS_LU_TABLE_SIZE        1
 299 
 300 /* Max number of Rx descriptors */
 301 #define MVNETA_MAX_RXD 512
 302 
 303 /* Max number of Tx descriptors */
 304 #define MVNETA_MAX_TXD 1024
 305 
 306 /* Max number of allowed TCP segments for software TSO */
 307 #define MVNETA_MAX_TSO_SEGS 100
 308 
 309 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
 310 
 311 /* descriptor aligned size */
 312 #define MVNETA_DESC_ALIGNED_SIZE        32
 313 
 314 /* Number of bytes to be taken into account by HW when putting incoming data
 315  * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
 316  * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
 317  */
 318 #define MVNETA_RX_PKT_OFFSET_CORRECTION         64
 319 
 320 #define MVNETA_RX_PKT_SIZE(mtu) \
 321         ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
 322               ETH_HLEN + ETH_FCS_LEN,                        \
 323               cache_line_size())
 324 
 325 #define IS_TSO_HEADER(txq, addr) \
 326         ((addr >= txq->tso_hdrs_phys) && \
 327          (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
 328 
 329 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
 330         (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
 331 
 332 enum {
 333         ETHTOOL_STAT_EEE_WAKEUP,
 334         ETHTOOL_STAT_SKB_ALLOC_ERR,
 335         ETHTOOL_STAT_REFILL_ERR,
 336         ETHTOOL_MAX_STATS,
 337 };
 338 
 339 struct mvneta_statistic {
 340         unsigned short offset;
 341         unsigned short type;
 342         const char name[ETH_GSTRING_LEN];
 343 };
 344 
 345 #define T_REG_32        32
 346 #define T_REG_64        64
 347 #define T_SW            1
 348 
 349 static const struct mvneta_statistic mvneta_statistics[] = {
 350         { 0x3000, T_REG_64, "good_octets_received", },
 351         { 0x3010, T_REG_32, "good_frames_received", },
 352         { 0x3008, T_REG_32, "bad_octets_received", },
 353         { 0x3014, T_REG_32, "bad_frames_received", },
 354         { 0x3018, T_REG_32, "broadcast_frames_received", },
 355         { 0x301c, T_REG_32, "multicast_frames_received", },
 356         { 0x3050, T_REG_32, "unrec_mac_control_received", },
 357         { 0x3058, T_REG_32, "good_fc_received", },
 358         { 0x305c, T_REG_32, "bad_fc_received", },
 359         { 0x3060, T_REG_32, "undersize_received", },
 360         { 0x3064, T_REG_32, "fragments_received", },
 361         { 0x3068, T_REG_32, "oversize_received", },
 362         { 0x306c, T_REG_32, "jabber_received", },
 363         { 0x3070, T_REG_32, "mac_receive_error", },
 364         { 0x3074, T_REG_32, "bad_crc_event", },
 365         { 0x3078, T_REG_32, "collision", },
 366         { 0x307c, T_REG_32, "late_collision", },
 367         { 0x2484, T_REG_32, "rx_discard", },
 368         { 0x2488, T_REG_32, "rx_overrun", },
 369         { 0x3020, T_REG_32, "frames_64_octets", },
 370         { 0x3024, T_REG_32, "frames_65_to_127_octets", },
 371         { 0x3028, T_REG_32, "frames_128_to_255_octets", },
 372         { 0x302c, T_REG_32, "frames_256_to_511_octets", },
 373         { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
 374         { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
 375         { 0x3038, T_REG_64, "good_octets_sent", },
 376         { 0x3040, T_REG_32, "good_frames_sent", },
 377         { 0x3044, T_REG_32, "excessive_collision", },
 378         { 0x3048, T_REG_32, "multicast_frames_sent", },
 379         { 0x304c, T_REG_32, "broadcast_frames_sent", },
 380         { 0x3054, T_REG_32, "fc_sent", },
 381         { 0x300c, T_REG_32, "internal_mac_transmit_err", },
 382         { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
 383         { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
 384         { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
 385 };
 386 
 387 struct mvneta_pcpu_stats {
 388         struct  u64_stats_sync syncp;
 389         u64     rx_packets;
 390         u64     rx_bytes;
 391         u64     rx_dropped;
 392         u64     rx_errors;
 393         u64     tx_packets;
 394         u64     tx_bytes;
 395 };
 396 
 397 struct mvneta_pcpu_port {
 398         /* Pointer to the shared port */
 399         struct mvneta_port      *pp;
 400 
 401         /* Pointer to the CPU-local NAPI struct */
 402         struct napi_struct      napi;
 403 
 404         /* Cause of the previous interrupt */
 405         u32                     cause_rx_tx;
 406 };
 407 
 408 struct mvneta_port {
 409         u8 id;
 410         struct mvneta_pcpu_port __percpu        *ports;
 411         struct mvneta_pcpu_stats __percpu       *stats;
 412 
 413         int pkt_size;
 414         void __iomem *base;
 415         struct mvneta_rx_queue *rxqs;
 416         struct mvneta_tx_queue *txqs;
 417         struct net_device *dev;
 418         struct hlist_node node_online;
 419         struct hlist_node node_dead;
 420         int rxq_def;
 421         /* Protect the access to the percpu interrupt registers,
 422          * ensuring that the configuration remains coherent.
 423          */
 424         spinlock_t lock;
 425         bool is_stopped;
 426 
 427         u32 cause_rx_tx;
 428         struct napi_struct napi;
 429 
 430         /* Core clock */
 431         struct clk *clk;
 432         /* AXI clock */
 433         struct clk *clk_bus;
 434         u8 mcast_count[256];
 435         u16 tx_ring_size;
 436         u16 rx_ring_size;
 437 
 438         phy_interface_t phy_interface;
 439         struct device_node *dn;
 440         unsigned int tx_csum_limit;
 441         struct phylink *phylink;
 442         struct phylink_config phylink_config;
 443         struct phy *comphy;
 444 
 445         struct mvneta_bm *bm_priv;
 446         struct mvneta_bm_pool *pool_long;
 447         struct mvneta_bm_pool *pool_short;
 448         int bm_win_id;
 449 
 450         bool eee_enabled;
 451         bool eee_active;
 452         bool tx_lpi_enabled;
 453 
 454         u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
 455 
 456         u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 457 
 458         /* Flags for special SoC configurations */
 459         bool neta_armada3700;
 460         u16 rx_offset_correction;
 461         const struct mbus_dram_target_info *dram_target_info;
 462 };
 463 
 464 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
 465  * layout of the transmit and reception DMA descriptors, and their
 466  * layout is therefore defined by the hardware design
 467  */
 468 
 469 #define MVNETA_TX_L3_OFF_SHIFT  0
 470 #define MVNETA_TX_IP_HLEN_SHIFT 8
 471 #define MVNETA_TX_L4_UDP        BIT(16)
 472 #define MVNETA_TX_L3_IP6        BIT(17)
 473 #define MVNETA_TXD_IP_CSUM      BIT(18)
 474 #define MVNETA_TXD_Z_PAD        BIT(19)
 475 #define MVNETA_TXD_L_DESC       BIT(20)
 476 #define MVNETA_TXD_F_DESC       BIT(21)
 477 #define MVNETA_TXD_FLZ_DESC     (MVNETA_TXD_Z_PAD  | \
 478                                  MVNETA_TXD_L_DESC | \
 479                                  MVNETA_TXD_F_DESC)
 480 #define MVNETA_TX_L4_CSUM_FULL  BIT(30)
 481 #define MVNETA_TX_L4_CSUM_NOT   BIT(31)
 482 
 483 #define MVNETA_RXD_ERR_CRC              0x0
 484 #define MVNETA_RXD_BM_POOL_SHIFT        13
 485 #define MVNETA_RXD_BM_POOL_MASK         (BIT(13) | BIT(14))
 486 #define MVNETA_RXD_ERR_SUMMARY          BIT(16)
 487 #define MVNETA_RXD_ERR_OVERRUN          BIT(17)
 488 #define MVNETA_RXD_ERR_LEN              BIT(18)
 489 #define MVNETA_RXD_ERR_RESOURCE         (BIT(17) | BIT(18))
 490 #define MVNETA_RXD_ERR_CODE_MASK        (BIT(17) | BIT(18))
 491 #define MVNETA_RXD_L3_IP4               BIT(25)
 492 #define MVNETA_RXD_LAST_DESC            BIT(26)
 493 #define MVNETA_RXD_FIRST_DESC           BIT(27)
 494 #define MVNETA_RXD_FIRST_LAST_DESC      (MVNETA_RXD_FIRST_DESC | \
 495                                          MVNETA_RXD_LAST_DESC)
 496 #define MVNETA_RXD_L4_CSUM_OK           BIT(30)
 497 
 498 #if defined(__LITTLE_ENDIAN)
 499 struct mvneta_tx_desc {
 500         u32  command;           /* Options used by HW for packet transmitting.*/
 501         u16  reserved1;         /* csum_l4 (for future use)             */
 502         u16  data_size;         /* Data size of transmitted packet in bytes */
 503         u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 504         u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 505         u32  reserved3[4];      /* Reserved - (for future use)          */
 506 };
 507 
 508 struct mvneta_rx_desc {
 509         u32  status;            /* Info about received packet           */
 510         u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 511         u16  data_size;         /* Size of received packet in bytes     */
 512 
 513         u32  buf_phys_addr;     /* Physical address of the buffer       */
 514         u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 515 
 516         u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 517         u16  reserved3;         /* prefetch_cmd, for future use         */
 518         u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 519 
 520         u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 521         u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 522 };
 523 #else
 524 struct mvneta_tx_desc {
 525         u16  data_size;         /* Data size of transmitted packet in bytes */
 526         u16  reserved1;         /* csum_l4 (for future use)             */
 527         u32  command;           /* Options used by HW for packet transmitting.*/
 528         u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
 529         u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
 530         u32  reserved3[4];      /* Reserved - (for future use)          */
 531 };
 532 
 533 struct mvneta_rx_desc {
 534         u16  data_size;         /* Size of received packet in bytes     */
 535         u16  reserved1;         /* pnc_info - (for future use, PnC)     */
 536         u32  status;            /* Info about received packet           */
 537 
 538         u32  reserved2;         /* pnc_flow_id  (for future use, PnC)   */
 539         u32  buf_phys_addr;     /* Physical address of the buffer       */
 540 
 541         u16  reserved4;         /* csum_l4 - (for future use, PnC)      */
 542         u16  reserved3;         /* prefetch_cmd, for future use         */
 543         u32  buf_cookie;        /* cookie for access to RX buffer in rx path */
 544 
 545         u32  reserved5;         /* pnc_extra PnC (for future use, PnC)  */
 546         u32  reserved6;         /* hw_cmd (for future use, PnC and HWF) */
 547 };
 548 #endif
 549 
 550 struct mvneta_tx_queue {
 551         /* Number of this TX queue, in the range 0-7 */
 552         u8 id;
 553 
 554         /* Number of TX DMA descriptors in the descriptor ring */
 555         int size;
 556 
 557         /* Number of currently used TX DMA descriptor in the
 558          * descriptor ring
 559          */
 560         int count;
 561         int pending;
 562         int tx_stop_threshold;
 563         int tx_wake_threshold;
 564 
 565         /* Array of transmitted skb */
 566         struct sk_buff **tx_skb;
 567 
 568         /* Index of last TX DMA descriptor that was inserted */
 569         int txq_put_index;
 570 
 571         /* Index of the TX DMA descriptor to be cleaned up */
 572         int txq_get_index;
 573 
 574         u32 done_pkts_coal;
 575 
 576         /* Virtual address of the TX DMA descriptors array */
 577         struct mvneta_tx_desc *descs;
 578 
 579         /* DMA address of the TX DMA descriptors array */
 580         dma_addr_t descs_phys;
 581 
 582         /* Index of the last TX DMA descriptor */
 583         int last_desc;
 584 
 585         /* Index of the next TX DMA descriptor to process */
 586         int next_desc_to_proc;
 587 
 588         /* DMA buffers for TSO headers */
 589         char *tso_hdrs;
 590 
 591         /* DMA address of TSO headers */
 592         dma_addr_t tso_hdrs_phys;
 593 
 594         /* Affinity mask for CPUs*/
 595         cpumask_t affinity_mask;
 596 };
 597 
 598 struct mvneta_rx_queue {
 599         /* rx queue number, in the range 0-7 */
 600         u8 id;
 601 
 602         /* num of rx descriptors in the rx descriptor ring */
 603         int size;
 604 
 605         u32 pkts_coal;
 606         u32 time_coal;
 607 
 608         /* Virtual address of the RX buffer */
 609         void  **buf_virt_addr;
 610 
 611         /* Virtual address of the RX DMA descriptors array */
 612         struct mvneta_rx_desc *descs;
 613 
 614         /* DMA address of the RX DMA descriptors array */
 615         dma_addr_t descs_phys;
 616 
 617         /* Index of the last RX DMA descriptor */
 618         int last_desc;
 619 
 620         /* Index of the next RX DMA descriptor to process */
 621         int next_desc_to_proc;
 622 
 623         /* Index of first RX DMA descriptor to refill */
 624         int first_to_refill;
 625         u32 refill_num;
 626 
 627         /* pointer to uncomplete skb buffer */
 628         struct sk_buff *skb;
 629         int left_size;
 630 
 631         /* error counters */
 632         u32 skb_alloc_err;
 633         u32 refill_err;
 634 };
 635 
 636 static enum cpuhp_state online_hpstate;
 637 /* The hardware supports eight (8) rx queues, but we are only allowing
 638  * the first one to be used. Therefore, let's just allocate one queue.
 639  */
 640 static int rxq_number = 8;
 641 static int txq_number = 8;
 642 
 643 static int rxq_def;
 644 
 645 static int rx_copybreak __read_mostly = 256;
 646 static int rx_header_size __read_mostly = 128;
 647 
 648 /* HW BM need that each port be identify by a unique ID */
 649 static int global_port_id;
 650 
 651 #define MVNETA_DRIVER_NAME "mvneta"
 652 #define MVNETA_DRIVER_VERSION "1.0"
 653 
 654 /* Utility/helper methods */
 655 
 656 /* Write helper method */
 657 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
 658 {
 659         writel(data, pp->base + offset);
 660 }
 661 
 662 /* Read helper method */
 663 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
 664 {
 665         return readl(pp->base + offset);
 666 }
 667 
 668 /* Increment txq get counter */
 669 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
 670 {
 671         txq->txq_get_index++;
 672         if (txq->txq_get_index == txq->size)
 673                 txq->txq_get_index = 0;
 674 }
 675 
 676 /* Increment txq put counter */
 677 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
 678 {
 679         txq->txq_put_index++;
 680         if (txq->txq_put_index == txq->size)
 681                 txq->txq_put_index = 0;
 682 }
 683 
 684 
 685 /* Clear all MIB counters */
 686 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 687 {
 688         int i;
 689         u32 dummy;
 690 
 691         /* Perform dummy reads from MIB counters */
 692         for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
 693                 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
 694         dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
 695         dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
 696 }
 697 
 698 /* Get System Network Statistics */
 699 static void
 700 mvneta_get_stats64(struct net_device *dev,
 701                    struct rtnl_link_stats64 *stats)
 702 {
 703         struct mvneta_port *pp = netdev_priv(dev);
 704         unsigned int start;
 705         int cpu;
 706 
 707         for_each_possible_cpu(cpu) {
 708                 struct mvneta_pcpu_stats *cpu_stats;
 709                 u64 rx_packets;
 710                 u64 rx_bytes;
 711                 u64 rx_dropped;
 712                 u64 rx_errors;
 713                 u64 tx_packets;
 714                 u64 tx_bytes;
 715 
 716                 cpu_stats = per_cpu_ptr(pp->stats, cpu);
 717                 do {
 718                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 719                         rx_packets = cpu_stats->rx_packets;
 720                         rx_bytes   = cpu_stats->rx_bytes;
 721                         rx_dropped = cpu_stats->rx_dropped;
 722                         rx_errors  = cpu_stats->rx_errors;
 723                         tx_packets = cpu_stats->tx_packets;
 724                         tx_bytes   = cpu_stats->tx_bytes;
 725                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
 726 
 727                 stats->rx_packets += rx_packets;
 728                 stats->rx_bytes   += rx_bytes;
 729                 stats->rx_dropped += rx_dropped;
 730                 stats->rx_errors  += rx_errors;
 731                 stats->tx_packets += tx_packets;
 732                 stats->tx_bytes   += tx_bytes;
 733         }
 734 
 735         stats->tx_dropped       = dev->stats.tx_dropped;
 736 }
 737 
 738 /* Rx descriptors helper methods */
 739 
 740 /* Checks whether the RX descriptor having this status is both the first
 741  * and the last descriptor for the RX packet. Each RX packet is currently
 742  * received through a single RX descriptor, so not having each RX
 743  * descriptor with its first and last bits set is an error
 744  */
 745 static int mvneta_rxq_desc_is_first_last(u32 status)
 746 {
 747         return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
 748                 MVNETA_RXD_FIRST_LAST_DESC;
 749 }
 750 
 751 /* Add number of descriptors ready to receive new packets */
 752 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
 753                                           struct mvneta_rx_queue *rxq,
 754                                           int ndescs)
 755 {
 756         /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
 757          * be added at once
 758          */
 759         while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
 760                 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 761                             (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
 762                              MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 763                 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
 764         }
 765 
 766         mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
 767                     (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
 768 }
 769 
 770 /* Get number of RX descriptors occupied by received packets */
 771 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
 772                                         struct mvneta_rx_queue *rxq)
 773 {
 774         u32 val;
 775 
 776         val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
 777         return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
 778 }
 779 
 780 /* Update num of rx desc called upon return from rx path or
 781  * from mvneta_rxq_drop_pkts().
 782  */
 783 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
 784                                        struct mvneta_rx_queue *rxq,
 785                                        int rx_done, int rx_filled)
 786 {
 787         u32 val;
 788 
 789         if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
 790                 val = rx_done |
 791                   (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
 792                 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 793                 return;
 794         }
 795 
 796         /* Only 255 descriptors can be added at once */
 797         while ((rx_done > 0) || (rx_filled > 0)) {
 798                 if (rx_done <= 0xff) {
 799                         val = rx_done;
 800                         rx_done = 0;
 801                 } else {
 802                         val = 0xff;
 803                         rx_done -= 0xff;
 804                 }
 805                 if (rx_filled <= 0xff) {
 806                         val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 807                         rx_filled = 0;
 808                 } else {
 809                         val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
 810                         rx_filled -= 0xff;
 811                 }
 812                 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
 813         }
 814 }
 815 
 816 /* Get pointer to next RX descriptor to be processed by SW */
 817 static struct mvneta_rx_desc *
 818 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
 819 {
 820         int rx_desc = rxq->next_desc_to_proc;
 821 
 822         rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
 823         prefetch(rxq->descs + rxq->next_desc_to_proc);
 824         return rxq->descs + rx_desc;
 825 }
 826 
 827 /* Change maximum receive size of the port. */
 828 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
 829 {
 830         u32 val;
 831 
 832         val =  mvreg_read(pp, MVNETA_GMAC_CTRL_0);
 833         val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
 834         val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
 835                 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
 836         mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
 837 }
 838 
 839 
 840 /* Set rx queue offset */
 841 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
 842                                   struct mvneta_rx_queue *rxq,
 843                                   int offset)
 844 {
 845         u32 val;
 846 
 847         val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 848         val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
 849 
 850         /* Offset is in */
 851         val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
 852         mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 853 }
 854 
 855 
 856 /* Tx descriptors helper methods */
 857 
 858 /* Update HW with number of TX descriptors to be sent */
 859 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
 860                                      struct mvneta_tx_queue *txq,
 861                                      int pend_desc)
 862 {
 863         u32 val;
 864 
 865         pend_desc += txq->pending;
 866 
 867         /* Only 255 Tx descriptors can be added at once */
 868         do {
 869                 val = min(pend_desc, 255);
 870                 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
 871                 pend_desc -= val;
 872         } while (pend_desc > 0);
 873         txq->pending = 0;
 874 }
 875 
 876 /* Get pointer to next TX descriptor to be processed (send) by HW */
 877 static struct mvneta_tx_desc *
 878 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
 879 {
 880         int tx_desc = txq->next_desc_to_proc;
 881 
 882         txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
 883         return txq->descs + tx_desc;
 884 }
 885 
 886 /* Release the last allocated TX descriptor. Useful to handle DMA
 887  * mapping failures in the TX path.
 888  */
 889 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
 890 {
 891         if (txq->next_desc_to_proc == 0)
 892                 txq->next_desc_to_proc = txq->last_desc - 1;
 893         else
 894                 txq->next_desc_to_proc--;
 895 }
 896 
 897 /* Set rxq buf size */
 898 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
 899                                     struct mvneta_rx_queue *rxq,
 900                                     int buf_size)
 901 {
 902         u32 val;
 903 
 904         val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
 905 
 906         val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
 907         val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
 908 
 909         mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
 910 }
 911 
 912 /* Disable buffer management (BM) */
 913 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
 914                                   struct mvneta_rx_queue *rxq)
 915 {
 916         u32 val;
 917 
 918         val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 919         val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
 920         mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 921 }
 922 
 923 /* Enable buffer management (BM) */
 924 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
 925                                  struct mvneta_rx_queue *rxq)
 926 {
 927         u32 val;
 928 
 929         val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 930         val |= MVNETA_RXQ_HW_BUF_ALLOC;
 931         mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 932 }
 933 
 934 /* Notify HW about port's assignment of pool for bigger packets */
 935 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
 936                                      struct mvneta_rx_queue *rxq)
 937 {
 938         u32 val;
 939 
 940         val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 941         val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
 942         val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
 943 
 944         mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 945 }
 946 
 947 /* Notify HW about port's assignment of pool for smaller packets */
 948 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
 949                                       struct mvneta_rx_queue *rxq)
 950 {
 951         u32 val;
 952 
 953         val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
 954         val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
 955         val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
 956 
 957         mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
 958 }
 959 
 960 /* Set port's receive buffer size for assigned BM pool */
 961 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
 962                                               int buf_size,
 963                                               u8 pool_id)
 964 {
 965         u32 val;
 966 
 967         if (!IS_ALIGNED(buf_size, 8)) {
 968                 dev_warn(pp->dev->dev.parent,
 969                          "illegal buf_size value %d, round to %d\n",
 970                          buf_size, ALIGN(buf_size, 8));
 971                 buf_size = ALIGN(buf_size, 8);
 972         }
 973 
 974         val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
 975         val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
 976         mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
 977 }
 978 
 979 /* Configure MBUS window in order to enable access BM internal SRAM */
 980 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
 981                                   u8 target, u8 attr)
 982 {
 983         u32 win_enable, win_protect;
 984         int i;
 985 
 986         win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
 987 
 988         if (pp->bm_win_id < 0) {
 989                 /* Find first not occupied window */
 990                 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
 991                         if (win_enable & (1 << i)) {
 992                                 pp->bm_win_id = i;
 993                                 break;
 994                         }
 995                 }
 996                 if (i == MVNETA_MAX_DECODE_WIN)
 997                         return -ENOMEM;
 998         } else {
 999                 i = pp->bm_win_id;
1000         }
1001 
1002         mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1003         mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1004 
1005         if (i < 4)
1006                 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1007 
1008         mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1009                     (attr << 8) | target);
1010 
1011         mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1012 
1013         win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1014         win_protect |= 3 << (2 * i);
1015         mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1016 
1017         win_enable &= ~(1 << i);
1018         mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1019 
1020         return 0;
1021 }
1022 
1023 static  int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1024 {
1025         u32 wsize;
1026         u8 target, attr;
1027         int err;
1028 
1029         /* Get BM window information */
1030         err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1031                                          &target, &attr);
1032         if (err < 0)
1033                 return err;
1034 
1035         pp->bm_win_id = -1;
1036 
1037         /* Open NETA -> BM window */
1038         err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1039                                      target, attr);
1040         if (err < 0) {
1041                 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1042                 return err;
1043         }
1044         return 0;
1045 }
1046 
1047 /* Assign and initialize pools for port. In case of fail
1048  * buffer manager will remain disabled for current port.
1049  */
1050 static int mvneta_bm_port_init(struct platform_device *pdev,
1051                                struct mvneta_port *pp)
1052 {
1053         struct device_node *dn = pdev->dev.of_node;
1054         u32 long_pool_id, short_pool_id;
1055 
1056         if (!pp->neta_armada3700) {
1057                 int ret;
1058 
1059                 ret = mvneta_bm_port_mbus_init(pp);
1060                 if (ret)
1061                         return ret;
1062         }
1063 
1064         if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1065                 netdev_info(pp->dev, "missing long pool id\n");
1066                 return -EINVAL;
1067         }
1068 
1069         /* Create port's long pool depending on mtu */
1070         pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1071                                            MVNETA_BM_LONG, pp->id,
1072                                            MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1073         if (!pp->pool_long) {
1074                 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1075                 return -ENOMEM;
1076         }
1077 
1078         pp->pool_long->port_map |= 1 << pp->id;
1079 
1080         mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1081                                    pp->pool_long->id);
1082 
1083         /* If short pool id is not defined, assume using single pool */
1084         if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1085                 short_pool_id = long_pool_id;
1086 
1087         /* Create port's short pool */
1088         pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1089                                             MVNETA_BM_SHORT, pp->id,
1090                                             MVNETA_BM_SHORT_PKT_SIZE);
1091         if (!pp->pool_short) {
1092                 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1093                 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1094                 return -ENOMEM;
1095         }
1096 
1097         if (short_pool_id != long_pool_id) {
1098                 pp->pool_short->port_map |= 1 << pp->id;
1099                 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1100                                            pp->pool_short->id);
1101         }
1102 
1103         return 0;
1104 }
1105 
1106 /* Update settings of a pool for bigger packets */
1107 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1108 {
1109         struct mvneta_bm_pool *bm_pool = pp->pool_long;
1110         struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1111         int num;
1112 
1113         /* Release all buffers from long pool */
1114         mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1115         if (hwbm_pool->buf_num) {
1116                 WARN(1, "cannot free all buffers in pool %d\n",
1117                      bm_pool->id);
1118                 goto bm_mtu_err;
1119         }
1120 
1121         bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1122         bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1123         hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1124                         SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1125 
1126         /* Fill entire long pool */
1127         num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1128         if (num != hwbm_pool->size) {
1129                 WARN(1, "pool %d: %d of %d allocated\n",
1130                      bm_pool->id, num, hwbm_pool->size);
1131                 goto bm_mtu_err;
1132         }
1133         mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1134 
1135         return;
1136 
1137 bm_mtu_err:
1138         mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1139         mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1140 
1141         pp->bm_priv = NULL;
1142         mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1143         netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1144 }
1145 
1146 /* Start the Ethernet port RX and TX activity */
1147 static void mvneta_port_up(struct mvneta_port *pp)
1148 {
1149         int queue;
1150         u32 q_map;
1151 
1152         /* Enable all initialized TXs. */
1153         q_map = 0;
1154         for (queue = 0; queue < txq_number; queue++) {
1155                 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1156                 if (txq->descs)
1157                         q_map |= (1 << queue);
1158         }
1159         mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1160 
1161         q_map = 0;
1162         /* Enable all initialized RXQs. */
1163         for (queue = 0; queue < rxq_number; queue++) {
1164                 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1165 
1166                 if (rxq->descs)
1167                         q_map |= (1 << queue);
1168         }
1169         mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1170 }
1171 
1172 /* Stop the Ethernet port activity */
1173 static void mvneta_port_down(struct mvneta_port *pp)
1174 {
1175         u32 val;
1176         int count;
1177 
1178         /* Stop Rx port activity. Check port Rx activity. */
1179         val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1180 
1181         /* Issue stop command for active channels only */
1182         if (val != 0)
1183                 mvreg_write(pp, MVNETA_RXQ_CMD,
1184                             val << MVNETA_RXQ_DISABLE_SHIFT);
1185 
1186         /* Wait for all Rx activity to terminate. */
1187         count = 0;
1188         do {
1189                 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1190                         netdev_warn(pp->dev,
1191                                     "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1192                                     val);
1193                         break;
1194                 }
1195                 mdelay(1);
1196 
1197                 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1198         } while (val & MVNETA_RXQ_ENABLE_MASK);
1199 
1200         /* Stop Tx port activity. Check port Tx activity. Issue stop
1201          * command for active channels only
1202          */
1203         val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1204 
1205         if (val != 0)
1206                 mvreg_write(pp, MVNETA_TXQ_CMD,
1207                             (val << MVNETA_TXQ_DISABLE_SHIFT));
1208 
1209         /* Wait for all Tx activity to terminate. */
1210         count = 0;
1211         do {
1212                 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1213                         netdev_warn(pp->dev,
1214                                     "TIMEOUT for TX stopped status=0x%08x\n",
1215                                     val);
1216                         break;
1217                 }
1218                 mdelay(1);
1219 
1220                 /* Check TX Command reg that all Txqs are stopped */
1221                 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1222 
1223         } while (val & MVNETA_TXQ_ENABLE_MASK);
1224 
1225         /* Double check to verify that TX FIFO is empty */
1226         count = 0;
1227         do {
1228                 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1229                         netdev_warn(pp->dev,
1230                                     "TX FIFO empty timeout status=0x%08x\n",
1231                                     val);
1232                         break;
1233                 }
1234                 mdelay(1);
1235 
1236                 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1237         } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1238                  (val & MVNETA_TX_IN_PRGRS));
1239 
1240         udelay(200);
1241 }
1242 
1243 /* Enable the port by setting the port enable bit of the MAC control register */
1244 static void mvneta_port_enable(struct mvneta_port *pp)
1245 {
1246         u32 val;
1247 
1248         /* Enable port */
1249         val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1250         val |= MVNETA_GMAC0_PORT_ENABLE;
1251         mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1252 }
1253 
1254 /* Disable the port and wait for about 200 usec before retuning */
1255 static void mvneta_port_disable(struct mvneta_port *pp)
1256 {
1257         u32 val;
1258 
1259         /* Reset the Enable bit in the Serial Control Register */
1260         val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1261         val &= ~MVNETA_GMAC0_PORT_ENABLE;
1262         mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1263 
1264         udelay(200);
1265 }
1266 
1267 /* Multicast tables methods */
1268 
1269 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1270 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1271 {
1272         int offset;
1273         u32 val;
1274 
1275         if (queue == -1) {
1276                 val = 0;
1277         } else {
1278                 val = 0x1 | (queue << 1);
1279                 val |= (val << 24) | (val << 16) | (val << 8);
1280         }
1281 
1282         for (offset = 0; offset <= 0xc; offset += 4)
1283                 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1284 }
1285 
1286 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1287 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1288 {
1289         int offset;
1290         u32 val;
1291 
1292         if (queue == -1) {
1293                 val = 0;
1294         } else {
1295                 val = 0x1 | (queue << 1);
1296                 val |= (val << 24) | (val << 16) | (val << 8);
1297         }
1298 
1299         for (offset = 0; offset <= 0xfc; offset += 4)
1300                 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1301 
1302 }
1303 
1304 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1305 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1306 {
1307         int offset;
1308         u32 val;
1309 
1310         if (queue == -1) {
1311                 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1312                 val = 0;
1313         } else {
1314                 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1315                 val = 0x1 | (queue << 1);
1316                 val |= (val << 24) | (val << 16) | (val << 8);
1317         }
1318 
1319         for (offset = 0; offset <= 0xfc; offset += 4)
1320                 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1321 }
1322 
1323 static void mvneta_percpu_unmask_interrupt(void *arg)
1324 {
1325         struct mvneta_port *pp = arg;
1326 
1327         /* All the queue are unmasked, but actually only the ones
1328          * mapped to this CPU will be unmasked
1329          */
1330         mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1331                     MVNETA_RX_INTR_MASK_ALL |
1332                     MVNETA_TX_INTR_MASK_ALL |
1333                     MVNETA_MISCINTR_INTR_MASK);
1334 }
1335 
1336 static void mvneta_percpu_mask_interrupt(void *arg)
1337 {
1338         struct mvneta_port *pp = arg;
1339 
1340         /* All the queue are masked, but actually only the ones
1341          * mapped to this CPU will be masked
1342          */
1343         mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1344         mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1345         mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1346 }
1347 
1348 static void mvneta_percpu_clear_intr_cause(void *arg)
1349 {
1350         struct mvneta_port *pp = arg;
1351 
1352         /* All the queue are cleared, but actually only the ones
1353          * mapped to this CPU will be cleared
1354          */
1355         mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1356         mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1357         mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1358 }
1359 
1360 /* This method sets defaults to the NETA port:
1361  *      Clears interrupt Cause and Mask registers.
1362  *      Clears all MAC tables.
1363  *      Sets defaults to all registers.
1364  *      Resets RX and TX descriptor rings.
1365  *      Resets PHY.
1366  * This method can be called after mvneta_port_down() to return the port
1367  *      settings to defaults.
1368  */
1369 static void mvneta_defaults_set(struct mvneta_port *pp)
1370 {
1371         int cpu;
1372         int queue;
1373         u32 val;
1374         int max_cpu = num_present_cpus();
1375 
1376         /* Clear all Cause registers */
1377         on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1378 
1379         /* Mask all interrupts */
1380         on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1381         mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1382 
1383         /* Enable MBUS Retry bit16 */
1384         mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1385 
1386         /* Set CPU queue access map. CPUs are assigned to the RX and
1387          * TX queues modulo their number. If there is only one TX
1388          * queue then it is assigned to the CPU associated to the
1389          * default RX queue.
1390          */
1391         for_each_present_cpu(cpu) {
1392                 int rxq_map = 0, txq_map = 0;
1393                 int rxq, txq;
1394                 if (!pp->neta_armada3700) {
1395                         for (rxq = 0; rxq < rxq_number; rxq++)
1396                                 if ((rxq % max_cpu) == cpu)
1397                                         rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1398 
1399                         for (txq = 0; txq < txq_number; txq++)
1400                                 if ((txq % max_cpu) == cpu)
1401                                         txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1402 
1403                         /* With only one TX queue we configure a special case
1404                          * which will allow to get all the irq on a single
1405                          * CPU
1406                          */
1407                         if (txq_number == 1)
1408                                 txq_map = (cpu == pp->rxq_def) ?
1409                                         MVNETA_CPU_TXQ_ACCESS(1) : 0;
1410 
1411                 } else {
1412                         txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1413                         rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1414                 }
1415 
1416                 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1417         }
1418 
1419         /* Reset RX and TX DMAs */
1420         mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1421         mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1422 
1423         /* Disable Legacy WRR, Disable EJP, Release from reset */
1424         mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1425         for (queue = 0; queue < txq_number; queue++) {
1426                 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1427                 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1428         }
1429 
1430         mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1431         mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1432 
1433         /* Set Port Acceleration Mode */
1434         if (pp->bm_priv)
1435                 /* HW buffer management + legacy parser */
1436                 val = MVNETA_ACC_MODE_EXT2;
1437         else
1438                 /* SW buffer management + legacy parser */
1439                 val = MVNETA_ACC_MODE_EXT1;
1440         mvreg_write(pp, MVNETA_ACC_MODE, val);
1441 
1442         if (pp->bm_priv)
1443                 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1444 
1445         /* Update val of portCfg register accordingly with all RxQueue types */
1446         val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1447         mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1448 
1449         val = 0;
1450         mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1451         mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1452 
1453         /* Build PORT_SDMA_CONFIG_REG */
1454         val = 0;
1455 
1456         /* Default burst size */
1457         val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1458         val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1459         val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1460 
1461 #if defined(__BIG_ENDIAN)
1462         val |= MVNETA_DESC_SWAP;
1463 #endif
1464 
1465         /* Assign port SDMA configuration */
1466         mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1467 
1468         /* Disable PHY polling in hardware, since we're using the
1469          * kernel phylib to do this.
1470          */
1471         val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1472         val &= ~MVNETA_PHY_POLLING_ENABLE;
1473         mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1474 
1475         mvneta_set_ucast_table(pp, -1);
1476         mvneta_set_special_mcast_table(pp, -1);
1477         mvneta_set_other_mcast_table(pp, -1);
1478 
1479         /* Set port interrupt enable register - default enable all */
1480         mvreg_write(pp, MVNETA_INTR_ENABLE,
1481                     (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1482                      | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1483 
1484         mvneta_mib_counters_clear(pp);
1485 }
1486 
1487 /* Set max sizes for tx queues */
1488 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1489 
1490 {
1491         u32 val, size, mtu;
1492         int queue;
1493 
1494         mtu = max_tx_size * 8;
1495         if (mtu > MVNETA_TX_MTU_MAX)
1496                 mtu = MVNETA_TX_MTU_MAX;
1497 
1498         /* Set MTU */
1499         val = mvreg_read(pp, MVNETA_TX_MTU);
1500         val &= ~MVNETA_TX_MTU_MAX;
1501         val |= mtu;
1502         mvreg_write(pp, MVNETA_TX_MTU, val);
1503 
1504         /* TX token size and all TXQs token size must be larger that MTU */
1505         val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1506 
1507         size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1508         if (size < mtu) {
1509                 size = mtu;
1510                 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1511                 val |= size;
1512                 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1513         }
1514         for (queue = 0; queue < txq_number; queue++) {
1515                 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1516 
1517                 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1518                 if (size < mtu) {
1519                         size = mtu;
1520                         val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1521                         val |= size;
1522                         mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1523                 }
1524         }
1525 }
1526 
1527 /* Set unicast address */
1528 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1529                                   int queue)
1530 {
1531         unsigned int unicast_reg;
1532         unsigned int tbl_offset;
1533         unsigned int reg_offset;
1534 
1535         /* Locate the Unicast table entry */
1536         last_nibble = (0xf & last_nibble);
1537 
1538         /* offset from unicast tbl base */
1539         tbl_offset = (last_nibble / 4) * 4;
1540 
1541         /* offset within the above reg  */
1542         reg_offset = last_nibble % 4;
1543 
1544         unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1545 
1546         if (queue == -1) {
1547                 /* Clear accepts frame bit at specified unicast DA tbl entry */
1548                 unicast_reg &= ~(0xff << (8 * reg_offset));
1549         } else {
1550                 unicast_reg &= ~(0xff << (8 * reg_offset));
1551                 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1552         }
1553 
1554         mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1555 }
1556 
1557 /* Set mac address */
1558 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1559                                 int queue)
1560 {
1561         unsigned int mac_h;
1562         unsigned int mac_l;
1563 
1564         if (queue != -1) {
1565                 mac_l = (addr[4] << 8) | (addr[5]);
1566                 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1567                         (addr[2] << 8) | (addr[3] << 0);
1568 
1569                 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1570                 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1571         }
1572 
1573         /* Accept frames of this address */
1574         mvneta_set_ucast_addr(pp, addr[5], queue);
1575 }
1576 
1577 /* Set the number of packets that will be received before RX interrupt
1578  * will be generated by HW.
1579  */
1580 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1581                                     struct mvneta_rx_queue *rxq, u32 value)
1582 {
1583         mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1584                     value | MVNETA_RXQ_NON_OCCUPIED(0));
1585 }
1586 
1587 /* Set the time delay in usec before RX interrupt will be generated by
1588  * HW.
1589  */
1590 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1591                                     struct mvneta_rx_queue *rxq, u32 value)
1592 {
1593         u32 val;
1594         unsigned long clk_rate;
1595 
1596         clk_rate = clk_get_rate(pp->clk);
1597         val = (clk_rate / 1000000) * value;
1598 
1599         mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1600 }
1601 
1602 /* Set threshold for TX_DONE pkts coalescing */
1603 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1604                                          struct mvneta_tx_queue *txq, u32 value)
1605 {
1606         u32 val;
1607 
1608         val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1609 
1610         val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1611         val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1612 
1613         mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1614 }
1615 
1616 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1617 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1618                                 u32 phys_addr, void *virt_addr,
1619                                 struct mvneta_rx_queue *rxq)
1620 {
1621         int i;
1622 
1623         rx_desc->buf_phys_addr = phys_addr;
1624         i = rx_desc - rxq->descs;
1625         rxq->buf_virt_addr[i] = virt_addr;
1626 }
1627 
1628 /* Decrement sent descriptors counter */
1629 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1630                                      struct mvneta_tx_queue *txq,
1631                                      int sent_desc)
1632 {
1633         u32 val;
1634 
1635         /* Only 255 TX descriptors can be updated at once */
1636         while (sent_desc > 0xff) {
1637                 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1638                 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1639                 sent_desc = sent_desc - 0xff;
1640         }
1641 
1642         val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1643         mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1644 }
1645 
1646 /* Get number of TX descriptors already sent by HW */
1647 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1648                                         struct mvneta_tx_queue *txq)
1649 {
1650         u32 val;
1651         int sent_desc;
1652 
1653         val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1654         sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1655                 MVNETA_TXQ_SENT_DESC_SHIFT;
1656 
1657         return sent_desc;
1658 }
1659 
1660 /* Get number of sent descriptors and decrement counter.
1661  *  The number of sent descriptors is returned.
1662  */
1663 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1664                                      struct mvneta_tx_queue *txq)
1665 {
1666         int sent_desc;
1667 
1668         /* Get number of sent descriptors */
1669         sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1670 
1671         /* Decrement sent descriptors counter */
1672         if (sent_desc)
1673                 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1674 
1675         return sent_desc;
1676 }
1677 
1678 /* Set TXQ descriptors fields relevant for CSUM calculation */
1679 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1680                                 int ip_hdr_len, int l4_proto)
1681 {
1682         u32 command;
1683 
1684         /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1685          * G_L4_chk, L4_type; required only for checksum
1686          * calculation
1687          */
1688         command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
1689         command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1690 
1691         if (l3_proto == htons(ETH_P_IP))
1692                 command |= MVNETA_TXD_IP_CSUM;
1693         else
1694                 command |= MVNETA_TX_L3_IP6;
1695 
1696         if (l4_proto == IPPROTO_TCP)
1697                 command |=  MVNETA_TX_L4_CSUM_FULL;
1698         else if (l4_proto == IPPROTO_UDP)
1699                 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1700         else
1701                 command |= MVNETA_TX_L4_CSUM_NOT;
1702 
1703         return command;
1704 }
1705 
1706 
1707 /* Display more error info */
1708 static void mvneta_rx_error(struct mvneta_port *pp,
1709                             struct mvneta_rx_desc *rx_desc)
1710 {
1711         struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1712         u32 status = rx_desc->status;
1713 
1714         /* update per-cpu counter */
1715         u64_stats_update_begin(&stats->syncp);
1716         stats->rx_errors++;
1717         u64_stats_update_end(&stats->syncp);
1718 
1719         switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1720         case MVNETA_RXD_ERR_CRC:
1721                 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1722                            status, rx_desc->data_size);
1723                 break;
1724         case MVNETA_RXD_ERR_OVERRUN:
1725                 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1726                            status, rx_desc->data_size);
1727                 break;
1728         case MVNETA_RXD_ERR_LEN:
1729                 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1730                            status, rx_desc->data_size);
1731                 break;
1732         case MVNETA_RXD_ERR_RESOURCE:
1733                 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1734                            status, rx_desc->data_size);
1735                 break;
1736         }
1737 }
1738 
1739 /* Handle RX checksum offload based on the descriptor's status */
1740 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1741                            struct sk_buff *skb)
1742 {
1743         if ((pp->dev->features & NETIF_F_RXCSUM) &&
1744             (status & MVNETA_RXD_L3_IP4) &&
1745             (status & MVNETA_RXD_L4_CSUM_OK)) {
1746                 skb->csum = 0;
1747                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1748                 return;
1749         }
1750 
1751         skb->ip_summed = CHECKSUM_NONE;
1752 }
1753 
1754 /* Return tx queue pointer (find last set bit) according to <cause> returned
1755  * form tx_done reg. <cause> must not be null. The return value is always a
1756  * valid queue for matching the first one found in <cause>.
1757  */
1758 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1759                                                      u32 cause)
1760 {
1761         int queue = fls(cause) - 1;
1762 
1763         return &pp->txqs[queue];
1764 }
1765 
1766 /* Free tx queue skbuffs */
1767 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1768                                  struct mvneta_tx_queue *txq, int num,
1769                                  struct netdev_queue *nq)
1770 {
1771         unsigned int bytes_compl = 0, pkts_compl = 0;
1772         int i;
1773 
1774         for (i = 0; i < num; i++) {
1775                 struct mvneta_tx_desc *tx_desc = txq->descs +
1776                         txq->txq_get_index;
1777                 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1778 
1779                 if (skb) {
1780                         bytes_compl += skb->len;
1781                         pkts_compl++;
1782                 }
1783 
1784                 mvneta_txq_inc_get(txq);
1785 
1786                 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1787                         dma_unmap_single(pp->dev->dev.parent,
1788                                          tx_desc->buf_phys_addr,
1789                                          tx_desc->data_size, DMA_TO_DEVICE);
1790                 if (!skb)
1791                         continue;
1792                 dev_kfree_skb_any(skb);
1793         }
1794 
1795         netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1796 }
1797 
1798 /* Handle end of transmission */
1799 static void mvneta_txq_done(struct mvneta_port *pp,
1800                            struct mvneta_tx_queue *txq)
1801 {
1802         struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1803         int tx_done;
1804 
1805         tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1806         if (!tx_done)
1807                 return;
1808 
1809         mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1810 
1811         txq->count -= tx_done;
1812 
1813         if (netif_tx_queue_stopped(nq)) {
1814                 if (txq->count <= txq->tx_wake_threshold)
1815                         netif_tx_wake_queue(nq);
1816         }
1817 }
1818 
1819 /* Refill processing for SW buffer management */
1820 /* Allocate page per descriptor */
1821 static int mvneta_rx_refill(struct mvneta_port *pp,
1822                             struct mvneta_rx_desc *rx_desc,
1823                             struct mvneta_rx_queue *rxq,
1824                             gfp_t gfp_mask)
1825 {
1826         dma_addr_t phys_addr;
1827         struct page *page;
1828 
1829         page = __dev_alloc_page(gfp_mask);
1830         if (!page)
1831                 return -ENOMEM;
1832 
1833         /* map page for use */
1834         phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE,
1835                                  DMA_FROM_DEVICE);
1836         if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1837                 __free_page(page);
1838                 return -ENOMEM;
1839         }
1840 
1841         phys_addr += pp->rx_offset_correction;
1842         mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1843         return 0;
1844 }
1845 
1846 /* Handle tx checksum */
1847 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1848 {
1849         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1850                 int ip_hdr_len = 0;
1851                 __be16 l3_proto = vlan_get_protocol(skb);
1852                 u8 l4_proto;
1853 
1854                 if (l3_proto == htons(ETH_P_IP)) {
1855                         struct iphdr *ip4h = ip_hdr(skb);
1856 
1857                         /* Calculate IPv4 checksum and L4 checksum */
1858                         ip_hdr_len = ip4h->ihl;
1859                         l4_proto = ip4h->protocol;
1860                 } else if (l3_proto == htons(ETH_P_IPV6)) {
1861                         struct ipv6hdr *ip6h = ipv6_hdr(skb);
1862 
1863                         /* Read l4_protocol from one of IPv6 extra headers */
1864                         if (skb_network_header_len(skb) > 0)
1865                                 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1866                         l4_proto = ip6h->nexthdr;
1867                 } else
1868                         return MVNETA_TX_L4_CSUM_NOT;
1869 
1870                 return mvneta_txq_desc_csum(skb_network_offset(skb),
1871                                             l3_proto, ip_hdr_len, l4_proto);
1872         }
1873 
1874         return MVNETA_TX_L4_CSUM_NOT;
1875 }
1876 
1877 /* Drop packets received by the RXQ and free buffers */
1878 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1879                                  struct mvneta_rx_queue *rxq)
1880 {
1881         int rx_done, i;
1882 
1883         rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1884         if (rx_done)
1885                 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1886 
1887         if (pp->bm_priv) {
1888                 for (i = 0; i < rx_done; i++) {
1889                         struct mvneta_rx_desc *rx_desc =
1890                                                   mvneta_rxq_next_desc_get(rxq);
1891                         u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1892                         struct mvneta_bm_pool *bm_pool;
1893 
1894                         bm_pool = &pp->bm_priv->bm_pools[pool_id];
1895                         /* Return dropped buffer to the pool */
1896                         mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1897                                               rx_desc->buf_phys_addr);
1898                 }
1899                 return;
1900         }
1901 
1902         for (i = 0; i < rxq->size; i++) {
1903                 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1904                 void *data = rxq->buf_virt_addr[i];
1905                 if (!data || !(rx_desc->buf_phys_addr))
1906                         continue;
1907 
1908                 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1909                                PAGE_SIZE, DMA_FROM_DEVICE);
1910                 __free_page(data);
1911         }
1912 }
1913 
1914 static inline
1915 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1916 {
1917         struct mvneta_rx_desc *rx_desc;
1918         int curr_desc = rxq->first_to_refill;
1919         int i;
1920 
1921         for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1922                 rx_desc = rxq->descs + curr_desc;
1923                 if (!(rx_desc->buf_phys_addr)) {
1924                         if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
1925                                 pr_err("Can't refill queue %d. Done %d from %d\n",
1926                                        rxq->id, i, rxq->refill_num);
1927                                 rxq->refill_err++;
1928                                 break;
1929                         }
1930                 }
1931                 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
1932         }
1933         rxq->refill_num -= i;
1934         rxq->first_to_refill = curr_desc;
1935 
1936         return i;
1937 }
1938 
1939 /* Main rx processing when using software buffer management */
1940 static int mvneta_rx_swbm(struct napi_struct *napi,
1941                           struct mvneta_port *pp, int budget,
1942                           struct mvneta_rx_queue *rxq)
1943 {
1944         struct net_device *dev = pp->dev;
1945         int rx_todo, rx_proc;
1946         int refill = 0;
1947         u32 rcvd_pkts = 0;
1948         u32 rcvd_bytes = 0;
1949 
1950         /* Get number of received packets */
1951         rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
1952         rx_proc = 0;
1953 
1954         /* Fairness NAPI loop */
1955         while ((rcvd_pkts < budget) && (rx_proc < rx_todo)) {
1956                 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1957                 unsigned char *data;
1958                 struct page *page;
1959                 dma_addr_t phys_addr;
1960                 u32 rx_status, index;
1961                 int rx_bytes, skb_size, copy_size;
1962                 int frag_num, frag_size, frag_offset;
1963 
1964                 index = rx_desc - rxq->descs;
1965                 page = (struct page *)rxq->buf_virt_addr[index];
1966                 data = page_address(page);
1967                 /* Prefetch header */
1968                 prefetch(data);
1969 
1970                 phys_addr = rx_desc->buf_phys_addr;
1971                 rx_status = rx_desc->status;
1972                 rx_proc++;
1973                 rxq->refill_num++;
1974 
1975                 if (rx_status & MVNETA_RXD_FIRST_DESC) {
1976                         /* Check errors only for FIRST descriptor */
1977                         if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
1978                                 mvneta_rx_error(pp, rx_desc);
1979                                 /* leave the descriptor untouched */
1980                                 continue;
1981                         }
1982                         rx_bytes = rx_desc->data_size -
1983                                    (ETH_FCS_LEN + MVNETA_MH_SIZE);
1984 
1985                         /* Allocate small skb for each new packet */
1986                         skb_size = max(rx_copybreak, rx_header_size);
1987                         rxq->skb = netdev_alloc_skb_ip_align(dev, skb_size);
1988                         if (unlikely(!rxq->skb)) {
1989                                 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1990 
1991                                 netdev_err(dev,
1992                                            "Can't allocate skb on queue %d\n",
1993                                            rxq->id);
1994 
1995                                 rxq->skb_alloc_err++;
1996 
1997                                 u64_stats_update_begin(&stats->syncp);
1998                                 stats->rx_dropped++;
1999                                 u64_stats_update_end(&stats->syncp);
2000                                 continue;
2001                         }
2002                         copy_size = min(skb_size, rx_bytes);
2003 
2004                         /* Copy data from buffer to SKB, skip Marvell header */
2005                         memcpy(rxq->skb->data, data + MVNETA_MH_SIZE,
2006                                copy_size);
2007                         skb_put(rxq->skb, copy_size);
2008                         rxq->left_size = rx_bytes - copy_size;
2009 
2010                         mvneta_rx_csum(pp, rx_status, rxq->skb);
2011                         if (rxq->left_size == 0) {
2012                                 int size = copy_size + MVNETA_MH_SIZE;
2013 
2014                                 dma_sync_single_range_for_cpu(dev->dev.parent,
2015                                                               phys_addr, 0,
2016                                                               size,
2017                                                               DMA_FROM_DEVICE);
2018 
2019                                 /* leave the descriptor and buffer untouched */
2020                         } else {
2021                                 /* refill descriptor with new buffer later */
2022                                 rx_desc->buf_phys_addr = 0;
2023 
2024                                 frag_num = 0;
2025                                 frag_offset = copy_size + MVNETA_MH_SIZE;
2026                                 frag_size = min(rxq->left_size,
2027                                                 (int)(PAGE_SIZE - frag_offset));
2028                                 skb_add_rx_frag(rxq->skb, frag_num, page,
2029                                                 frag_offset, frag_size,
2030                                                 PAGE_SIZE);
2031                                 dma_unmap_page(dev->dev.parent, phys_addr,
2032                                                PAGE_SIZE, DMA_FROM_DEVICE);
2033                                 rxq->left_size -= frag_size;
2034                         }
2035                 } else {
2036                         /* Middle or Last descriptor */
2037                         if (unlikely(!rxq->skb)) {
2038                                 pr_debug("no skb for rx_status 0x%x\n",
2039                                          rx_status);
2040                                 continue;
2041                         }
2042                         if (!rxq->left_size) {
2043                                 /* last descriptor has only FCS */
2044                                 /* and can be discarded */
2045                                 dma_sync_single_range_for_cpu(dev->dev.parent,
2046                                                               phys_addr, 0,
2047                                                               ETH_FCS_LEN,
2048                                                               DMA_FROM_DEVICE);
2049                                 /* leave the descriptor and buffer untouched */
2050                         } else {
2051                                 /* refill descriptor with new buffer later */
2052                                 rx_desc->buf_phys_addr = 0;
2053 
2054                                 frag_num = skb_shinfo(rxq->skb)->nr_frags;
2055                                 frag_offset = 0;
2056                                 frag_size = min(rxq->left_size,
2057                                                 (int)(PAGE_SIZE - frag_offset));
2058                                 skb_add_rx_frag(rxq->skb, frag_num, page,
2059                                                 frag_offset, frag_size,
2060                                                 PAGE_SIZE);
2061 
2062                                 dma_unmap_page(dev->dev.parent, phys_addr,
2063                                                PAGE_SIZE, DMA_FROM_DEVICE);
2064 
2065                                 rxq->left_size -= frag_size;
2066                         }
2067                 } /* Middle or Last descriptor */
2068 
2069                 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2070                         /* no last descriptor this time */
2071                         continue;
2072 
2073                 if (rxq->left_size) {
2074                         pr_err("get last desc, but left_size (%d) != 0\n",
2075                                rxq->left_size);
2076                         dev_kfree_skb_any(rxq->skb);
2077                         rxq->left_size = 0;
2078                         rxq->skb = NULL;
2079                         continue;
2080                 }
2081                 rcvd_pkts++;
2082                 rcvd_bytes += rxq->skb->len;
2083 
2084                 /* Linux processing */
2085                 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2086 
2087                 napi_gro_receive(napi, rxq->skb);
2088 
2089                 /* clean uncomplete skb pointer in queue */
2090                 rxq->skb = NULL;
2091                 rxq->left_size = 0;
2092         }
2093 
2094         if (rcvd_pkts) {
2095                 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2096 
2097                 u64_stats_update_begin(&stats->syncp);
2098                 stats->rx_packets += rcvd_pkts;
2099                 stats->rx_bytes   += rcvd_bytes;
2100                 u64_stats_update_end(&stats->syncp);
2101         }
2102 
2103         /* return some buffers to hardware queue, one at a time is too slow */
2104         refill = mvneta_rx_refill_queue(pp, rxq);
2105 
2106         /* Update rxq management counters */
2107         mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2108 
2109         return rcvd_pkts;
2110 }
2111 
2112 /* Main rx processing when using hardware buffer management */
2113 static int mvneta_rx_hwbm(struct napi_struct *napi,
2114                           struct mvneta_port *pp, int rx_todo,
2115                           struct mvneta_rx_queue *rxq)
2116 {
2117         struct net_device *dev = pp->dev;
2118         int rx_done;
2119         u32 rcvd_pkts = 0;
2120         u32 rcvd_bytes = 0;
2121 
2122         /* Get number of received packets */
2123         rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2124 
2125         if (rx_todo > rx_done)
2126                 rx_todo = rx_done;
2127 
2128         rx_done = 0;
2129 
2130         /* Fairness NAPI loop */
2131         while (rx_done < rx_todo) {
2132                 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2133                 struct mvneta_bm_pool *bm_pool = NULL;
2134                 struct sk_buff *skb;
2135                 unsigned char *data;
2136                 dma_addr_t phys_addr;
2137                 u32 rx_status, frag_size;
2138                 int rx_bytes, err;
2139                 u8 pool_id;
2140 
2141                 rx_done++;
2142                 rx_status = rx_desc->status;
2143                 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2144                 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2145                 phys_addr = rx_desc->buf_phys_addr;
2146                 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2147                 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2148 
2149                 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2150                     (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2151 err_drop_frame_ret_pool:
2152                         /* Return the buffer to the pool */
2153                         mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2154                                               rx_desc->buf_phys_addr);
2155 err_drop_frame:
2156                         mvneta_rx_error(pp, rx_desc);
2157                         /* leave the descriptor untouched */
2158                         continue;
2159                 }
2160 
2161                 if (rx_bytes <= rx_copybreak) {
2162                         /* better copy a small frame and not unmap the DMA region */
2163                         skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2164                         if (unlikely(!skb))
2165                                 goto err_drop_frame_ret_pool;
2166 
2167                         dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2168                                                       rx_desc->buf_phys_addr,
2169                                                       MVNETA_MH_SIZE + NET_SKB_PAD,
2170                                                       rx_bytes,
2171                                                       DMA_FROM_DEVICE);
2172                         skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2173                                      rx_bytes);
2174 
2175                         skb->protocol = eth_type_trans(skb, dev);
2176                         mvneta_rx_csum(pp, rx_status, skb);
2177                         napi_gro_receive(napi, skb);
2178 
2179                         rcvd_pkts++;
2180                         rcvd_bytes += rx_bytes;
2181 
2182                         /* Return the buffer to the pool */
2183                         mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2184                                               rx_desc->buf_phys_addr);
2185 
2186                         /* leave the descriptor and buffer untouched */
2187                         continue;
2188                 }
2189 
2190                 /* Refill processing */
2191                 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2192                 if (err) {
2193                         netdev_err(dev, "Linux processing - Can't refill\n");
2194                         rxq->refill_err++;
2195                         goto err_drop_frame_ret_pool;
2196                 }
2197 
2198                 frag_size = bm_pool->hwbm_pool.frag_size;
2199 
2200                 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2201 
2202                 /* After refill old buffer has to be unmapped regardless
2203                  * the skb is successfully built or not.
2204                  */
2205                 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2206                                  bm_pool->buf_size, DMA_FROM_DEVICE);
2207                 if (!skb)
2208                         goto err_drop_frame;
2209 
2210                 rcvd_pkts++;
2211                 rcvd_bytes += rx_bytes;
2212 
2213                 /* Linux processing */
2214                 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2215                 skb_put(skb, rx_bytes);
2216 
2217                 skb->protocol = eth_type_trans(skb, dev);
2218 
2219                 mvneta_rx_csum(pp, rx_status, skb);
2220 
2221                 napi_gro_receive(napi, skb);
2222         }
2223 
2224         if (rcvd_pkts) {
2225                 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2226 
2227                 u64_stats_update_begin(&stats->syncp);
2228                 stats->rx_packets += rcvd_pkts;
2229                 stats->rx_bytes   += rcvd_bytes;
2230                 u64_stats_update_end(&stats->syncp);
2231         }
2232 
2233         /* Update rxq management counters */
2234         mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2235 
2236         return rx_done;
2237 }
2238 
2239 static inline void
2240 mvneta_tso_put_hdr(struct sk_buff *skb,
2241                    struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2242 {
2243         struct mvneta_tx_desc *tx_desc;
2244         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2245 
2246         txq->tx_skb[txq->txq_put_index] = NULL;
2247         tx_desc = mvneta_txq_next_desc_get(txq);
2248         tx_desc->data_size = hdr_len;
2249         tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2250         tx_desc->command |= MVNETA_TXD_F_DESC;
2251         tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2252                                  txq->txq_put_index * TSO_HEADER_SIZE;
2253         mvneta_txq_inc_put(txq);
2254 }
2255 
2256 static inline int
2257 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2258                     struct sk_buff *skb, char *data, int size,
2259                     bool last_tcp, bool is_last)
2260 {
2261         struct mvneta_tx_desc *tx_desc;
2262 
2263         tx_desc = mvneta_txq_next_desc_get(txq);
2264         tx_desc->data_size = size;
2265         tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2266                                                 size, DMA_TO_DEVICE);
2267         if (unlikely(dma_mapping_error(dev->dev.parent,
2268                      tx_desc->buf_phys_addr))) {
2269                 mvneta_txq_desc_put(txq);
2270                 return -ENOMEM;
2271         }
2272 
2273         tx_desc->command = 0;
2274         txq->tx_skb[txq->txq_put_index] = NULL;
2275 
2276         if (last_tcp) {
2277                 /* last descriptor in the TCP packet */
2278                 tx_desc->command = MVNETA_TXD_L_DESC;
2279 
2280                 /* last descriptor in SKB */
2281                 if (is_last)
2282                         txq->tx_skb[txq->txq_put_index] = skb;
2283         }
2284         mvneta_txq_inc_put(txq);
2285         return 0;
2286 }
2287 
2288 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2289                          struct mvneta_tx_queue *txq)
2290 {
2291         int total_len, data_left;
2292         int desc_count = 0;
2293         struct mvneta_port *pp = netdev_priv(dev);
2294         struct tso_t tso;
2295         int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2296         int i;
2297 
2298         /* Count needed descriptors */
2299         if ((txq->count + tso_count_descs(skb)) >= txq->size)
2300                 return 0;
2301 
2302         if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2303                 pr_info("*** Is this even  possible???!?!?\n");
2304                 return 0;
2305         }
2306 
2307         /* Initialize the TSO handler, and prepare the first payload */
2308         tso_start(skb, &tso);
2309 
2310         total_len = skb->len - hdr_len;
2311         while (total_len > 0) {
2312                 char *hdr;
2313 
2314                 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2315                 total_len -= data_left;
2316                 desc_count++;
2317 
2318                 /* prepare packet headers: MAC + IP + TCP */
2319                 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2320                 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2321 
2322                 mvneta_tso_put_hdr(skb, pp, txq);
2323 
2324                 while (data_left > 0) {
2325                         int size;
2326                         desc_count++;
2327 
2328                         size = min_t(int, tso.size, data_left);
2329 
2330                         if (mvneta_tso_put_data(dev, txq, skb,
2331                                                  tso.data, size,
2332                                                  size == data_left,
2333                                                  total_len == 0))
2334                                 goto err_release;
2335                         data_left -= size;
2336 
2337                         tso_build_data(skb, &tso, size);
2338                 }
2339         }
2340 
2341         return desc_count;
2342 
2343 err_release:
2344         /* Release all used data descriptors; header descriptors must not
2345          * be DMA-unmapped.
2346          */
2347         for (i = desc_count - 1; i >= 0; i--) {
2348                 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2349                 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2350                         dma_unmap_single(pp->dev->dev.parent,
2351                                          tx_desc->buf_phys_addr,
2352                                          tx_desc->data_size,
2353                                          DMA_TO_DEVICE);
2354                 mvneta_txq_desc_put(txq);
2355         }
2356         return 0;
2357 }
2358 
2359 /* Handle tx fragmentation processing */
2360 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2361                                   struct mvneta_tx_queue *txq)
2362 {
2363         struct mvneta_tx_desc *tx_desc;
2364         int i, nr_frags = skb_shinfo(skb)->nr_frags;
2365 
2366         for (i = 0; i < nr_frags; i++) {
2367                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2368                 void *addr = skb_frag_address(frag);
2369 
2370                 tx_desc = mvneta_txq_next_desc_get(txq);
2371                 tx_desc->data_size = skb_frag_size(frag);
2372 
2373                 tx_desc->buf_phys_addr =
2374                         dma_map_single(pp->dev->dev.parent, addr,
2375                                        tx_desc->data_size, DMA_TO_DEVICE);
2376 
2377                 if (dma_mapping_error(pp->dev->dev.parent,
2378                                       tx_desc->buf_phys_addr)) {
2379                         mvneta_txq_desc_put(txq);
2380                         goto error;
2381                 }
2382 
2383                 if (i == nr_frags - 1) {
2384                         /* Last descriptor */
2385                         tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2386                         txq->tx_skb[txq->txq_put_index] = skb;
2387                 } else {
2388                         /* Descriptor in the middle: Not First, Not Last */
2389                         tx_desc->command = 0;
2390                         txq->tx_skb[txq->txq_put_index] = NULL;
2391                 }
2392                 mvneta_txq_inc_put(txq);
2393         }
2394 
2395         return 0;
2396 
2397 error:
2398         /* Release all descriptors that were used to map fragments of
2399          * this packet, as well as the corresponding DMA mappings
2400          */
2401         for (i = i - 1; i >= 0; i--) {
2402                 tx_desc = txq->descs + i;
2403                 dma_unmap_single(pp->dev->dev.parent,
2404                                  tx_desc->buf_phys_addr,
2405                                  tx_desc->data_size,
2406                                  DMA_TO_DEVICE);
2407                 mvneta_txq_desc_put(txq);
2408         }
2409 
2410         return -ENOMEM;
2411 }
2412 
2413 /* Main tx processing */
2414 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2415 {
2416         struct mvneta_port *pp = netdev_priv(dev);
2417         u16 txq_id = skb_get_queue_mapping(skb);
2418         struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2419         struct mvneta_tx_desc *tx_desc;
2420         int len = skb->len;
2421         int frags = 0;
2422         u32 tx_cmd;
2423 
2424         if (!netif_running(dev))
2425                 goto out;
2426 
2427         if (skb_is_gso(skb)) {
2428                 frags = mvneta_tx_tso(skb, dev, txq);
2429                 goto out;
2430         }
2431 
2432         frags = skb_shinfo(skb)->nr_frags + 1;
2433 
2434         /* Get a descriptor for the first part of the packet */
2435         tx_desc = mvneta_txq_next_desc_get(txq);
2436 
2437         tx_cmd = mvneta_skb_tx_csum(pp, skb);
2438 
2439         tx_desc->data_size = skb_headlen(skb);
2440 
2441         tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2442                                                 tx_desc->data_size,
2443                                                 DMA_TO_DEVICE);
2444         if (unlikely(dma_mapping_error(dev->dev.parent,
2445                                        tx_desc->buf_phys_addr))) {
2446                 mvneta_txq_desc_put(txq);
2447                 frags = 0;
2448                 goto out;
2449         }
2450 
2451         if (frags == 1) {
2452                 /* First and Last descriptor */
2453                 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2454                 tx_desc->command = tx_cmd;
2455                 txq->tx_skb[txq->txq_put_index] = skb;
2456                 mvneta_txq_inc_put(txq);
2457         } else {
2458                 /* First but not Last */
2459                 tx_cmd |= MVNETA_TXD_F_DESC;
2460                 txq->tx_skb[txq->txq_put_index] = NULL;
2461                 mvneta_txq_inc_put(txq);
2462                 tx_desc->command = tx_cmd;
2463                 /* Continue with other skb fragments */
2464                 if (mvneta_tx_frag_process(pp, skb, txq)) {
2465                         dma_unmap_single(dev->dev.parent,
2466                                          tx_desc->buf_phys_addr,
2467                                          tx_desc->data_size,
2468                                          DMA_TO_DEVICE);
2469                         mvneta_txq_desc_put(txq);
2470                         frags = 0;
2471                         goto out;
2472                 }
2473         }
2474 
2475 out:
2476         if (frags > 0) {
2477                 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2478                 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2479 
2480                 netdev_tx_sent_queue(nq, len);
2481 
2482                 txq->count += frags;
2483                 if (txq->count >= txq->tx_stop_threshold)
2484                         netif_tx_stop_queue(nq);
2485 
2486                 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2487                     txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2488                         mvneta_txq_pend_desc_add(pp, txq, frags);
2489                 else
2490                         txq->pending += frags;
2491 
2492                 u64_stats_update_begin(&stats->syncp);
2493                 stats->tx_packets++;
2494                 stats->tx_bytes  += len;
2495                 u64_stats_update_end(&stats->syncp);
2496         } else {
2497                 dev->stats.tx_dropped++;
2498                 dev_kfree_skb_any(skb);
2499         }
2500 
2501         return NETDEV_TX_OK;
2502 }
2503 
2504 
2505 /* Free tx resources, when resetting a port */
2506 static void mvneta_txq_done_force(struct mvneta_port *pp,
2507                                   struct mvneta_tx_queue *txq)
2508 
2509 {
2510         struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2511         int tx_done = txq->count;
2512 
2513         mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2514 
2515         /* reset txq */
2516         txq->count = 0;
2517         txq->txq_put_index = 0;
2518         txq->txq_get_index = 0;
2519 }
2520 
2521 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2522  * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2523  */
2524 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2525 {
2526         struct mvneta_tx_queue *txq;
2527         struct netdev_queue *nq;
2528         int cpu = smp_processor_id();
2529 
2530         while (cause_tx_done) {
2531                 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2532 
2533                 nq = netdev_get_tx_queue(pp->dev, txq->id);
2534                 __netif_tx_lock(nq, cpu);
2535 
2536                 if (txq->count)
2537                         mvneta_txq_done(pp, txq);
2538 
2539                 __netif_tx_unlock(nq);
2540                 cause_tx_done &= ~((1 << txq->id));
2541         }
2542 }
2543 
2544 /* Compute crc8 of the specified address, using a unique algorithm ,
2545  * according to hw spec, different than generic crc8 algorithm
2546  */
2547 static int mvneta_addr_crc(unsigned char *addr)
2548 {
2549         int crc = 0;
2550         int i;
2551 
2552         for (i = 0; i < ETH_ALEN; i++) {
2553                 int j;
2554 
2555                 crc = (crc ^ addr[i]) << 8;
2556                 for (j = 7; j >= 0; j--) {
2557                         if (crc & (0x100 << j))
2558                                 crc ^= 0x107 << j;
2559                 }
2560         }
2561 
2562         return crc;
2563 }
2564 
2565 /* This method controls the net device special MAC multicast support.
2566  * The Special Multicast Table for MAC addresses supports MAC of the form
2567  * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2568  * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2569  * Table entries in the DA-Filter table. This method set the Special
2570  * Multicast Table appropriate entry.
2571  */
2572 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2573                                           unsigned char last_byte,
2574                                           int queue)
2575 {
2576         unsigned int smc_table_reg;
2577         unsigned int tbl_offset;
2578         unsigned int reg_offset;
2579 
2580         /* Register offset from SMC table base    */
2581         tbl_offset = (last_byte / 4);
2582         /* Entry offset within the above reg */
2583         reg_offset = last_byte % 4;
2584 
2585         smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2586                                         + tbl_offset * 4));
2587 
2588         if (queue == -1)
2589                 smc_table_reg &= ~(0xff << (8 * reg_offset));
2590         else {
2591                 smc_table_reg &= ~(0xff << (8 * reg_offset));
2592                 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2593         }
2594 
2595         mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2596                     smc_table_reg);
2597 }
2598 
2599 /* This method controls the network device Other MAC multicast support.
2600  * The Other Multicast Table is used for multicast of another type.
2601  * A CRC-8 is used as an index to the Other Multicast Table entries
2602  * in the DA-Filter table.
2603  * The method gets the CRC-8 value from the calling routine and
2604  * sets the Other Multicast Table appropriate entry according to the
2605  * specified CRC-8 .
2606  */
2607 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2608                                         unsigned char crc8,
2609                                         int queue)
2610 {
2611         unsigned int omc_table_reg;
2612         unsigned int tbl_offset;
2613         unsigned int reg_offset;
2614 
2615         tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2616         reg_offset = crc8 % 4;       /* Entry offset within the above reg   */
2617 
2618         omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2619 
2620         if (queue == -1) {
2621                 /* Clear accepts frame bit at specified Other DA table entry */
2622                 omc_table_reg &= ~(0xff << (8 * reg_offset));
2623         } else {
2624                 omc_table_reg &= ~(0xff << (8 * reg_offset));
2625                 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2626         }
2627 
2628         mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2629 }
2630 
2631 /* The network device supports multicast using two tables:
2632  *    1) Special Multicast Table for MAC addresses of the form
2633  *       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2634  *       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2635  *       Table entries in the DA-Filter table.
2636  *    2) Other Multicast Table for multicast of another type. A CRC-8 value
2637  *       is used as an index to the Other Multicast Table entries in the
2638  *       DA-Filter table.
2639  */
2640 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2641                                  int queue)
2642 {
2643         unsigned char crc_result = 0;
2644 
2645         if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2646                 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2647                 return 0;
2648         }
2649 
2650         crc_result = mvneta_addr_crc(p_addr);
2651         if (queue == -1) {
2652                 if (pp->mcast_count[crc_result] == 0) {
2653                         netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2654                                     crc_result);
2655                         return -EINVAL;
2656                 }
2657 
2658                 pp->mcast_count[crc_result]--;
2659                 if (pp->mcast_count[crc_result] != 0) {
2660                         netdev_info(pp->dev,
2661                                     "After delete there are %d valid Mcast for crc8=0x%02x\n",
2662                                     pp->mcast_count[crc_result], crc_result);
2663                         return -EINVAL;
2664                 }
2665         } else
2666                 pp->mcast_count[crc_result]++;
2667 
2668         mvneta_set_other_mcast_addr(pp, crc_result, queue);
2669 
2670         return 0;
2671 }
2672 
2673 /* Configure Fitering mode of Ethernet port */
2674 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2675                                           int is_promisc)
2676 {
2677         u32 port_cfg_reg, val;
2678 
2679         port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2680 
2681         val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2682 
2683         /* Set / Clear UPM bit in port configuration register */
2684         if (is_promisc) {
2685                 /* Accept all Unicast addresses */
2686                 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2687                 val |= MVNETA_FORCE_UNI;
2688                 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2689                 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2690         } else {
2691                 /* Reject all Unicast addresses */
2692                 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2693                 val &= ~MVNETA_FORCE_UNI;
2694         }
2695 
2696         mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2697         mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2698 }
2699 
2700 /* register unicast and multicast addresses */
2701 static void mvneta_set_rx_mode(struct net_device *dev)
2702 {
2703         struct mvneta_port *pp = netdev_priv(dev);
2704         struct netdev_hw_addr *ha;
2705 
2706         if (dev->flags & IFF_PROMISC) {
2707                 /* Accept all: Multicast + Unicast */
2708                 mvneta_rx_unicast_promisc_set(pp, 1);
2709                 mvneta_set_ucast_table(pp, pp->rxq_def);
2710                 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2711                 mvneta_set_other_mcast_table(pp, pp->rxq_def);
2712         } else {
2713                 /* Accept single Unicast */
2714                 mvneta_rx_unicast_promisc_set(pp, 0);
2715                 mvneta_set_ucast_table(pp, -1);
2716                 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
2717 
2718                 if (dev->flags & IFF_ALLMULTI) {
2719                         /* Accept all multicast */
2720                         mvneta_set_special_mcast_table(pp, pp->rxq_def);
2721                         mvneta_set_other_mcast_table(pp, pp->rxq_def);
2722                 } else {
2723                         /* Accept only initialized multicast */
2724                         mvneta_set_special_mcast_table(pp, -1);
2725                         mvneta_set_other_mcast_table(pp, -1);
2726 
2727                         if (!netdev_mc_empty(dev)) {
2728                                 netdev_for_each_mc_addr(ha, dev) {
2729                                         mvneta_mcast_addr_set(pp, ha->addr,
2730                                                               pp->rxq_def);
2731                                 }
2732                         }
2733                 }
2734         }
2735 }
2736 
2737 /* Interrupt handling - the callback for request_irq() */
2738 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2739 {
2740         struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2741 
2742         mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2743         napi_schedule(&pp->napi);
2744 
2745         return IRQ_HANDLED;
2746 }
2747 
2748 /* Interrupt handling - the callback for request_percpu_irq() */
2749 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2750 {
2751         struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
2752 
2753         disable_percpu_irq(port->pp->dev->irq);
2754         napi_schedule(&port->napi);
2755 
2756         return IRQ_HANDLED;
2757 }
2758 
2759 static void mvneta_link_change(struct mvneta_port *pp)
2760 {
2761         u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2762 
2763         phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
2764 }
2765 
2766 /* NAPI handler
2767  * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2768  * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2769  * Bits 8 -15 of the cause Rx Tx register indicate that are received
2770  * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2771  * Each CPU has its own causeRxTx register
2772  */
2773 static int mvneta_poll(struct napi_struct *napi, int budget)
2774 {
2775         int rx_done = 0;
2776         u32 cause_rx_tx;
2777         int rx_queue;
2778         struct mvneta_port *pp = netdev_priv(napi->dev);
2779         struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2780 
2781         if (!netif_running(pp->dev)) {
2782                 napi_complete(napi);
2783                 return rx_done;
2784         }
2785 
2786         /* Read cause register */
2787         cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2788         if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2789                 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2790 
2791                 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2792 
2793                 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2794                                   MVNETA_CAUSE_LINK_CHANGE))
2795                         mvneta_link_change(pp);
2796         }
2797 
2798         /* Release Tx descriptors */
2799         if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2800                 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2801                 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2802         }
2803 
2804         /* For the case where the last mvneta_poll did not process all
2805          * RX packets
2806          */
2807         cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2808                 port->cause_rx_tx;
2809 
2810         rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2811         if (rx_queue) {
2812                 rx_queue = rx_queue - 1;
2813                 if (pp->bm_priv)
2814                         rx_done = mvneta_rx_hwbm(napi, pp, budget,
2815                                                  &pp->rxqs[rx_queue]);
2816                 else
2817                         rx_done = mvneta_rx_swbm(napi, pp, budget,
2818                                                  &pp->rxqs[rx_queue]);
2819         }
2820 
2821         if (rx_done < budget) {
2822                 cause_rx_tx = 0;
2823                 napi_complete_done(napi, rx_done);
2824 
2825                 if (pp->neta_armada3700) {
2826                         unsigned long flags;
2827 
2828                         local_irq_save(flags);
2829                         mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2830                                     MVNETA_RX_INTR_MASK(rxq_number) |
2831                                     MVNETA_TX_INTR_MASK(txq_number) |
2832                                     MVNETA_MISCINTR_INTR_MASK);
2833                         local_irq_restore(flags);
2834                 } else {
2835                         enable_percpu_irq(pp->dev->irq, 0);
2836                 }
2837         }
2838 
2839         if (pp->neta_armada3700)
2840                 pp->cause_rx_tx = cause_rx_tx;
2841         else
2842                 port->cause_rx_tx = cause_rx_tx;
2843 
2844         return rx_done;
2845 }
2846 
2847 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2848 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2849                            int num)
2850 {
2851         int i;
2852 
2853         for (i = 0; i < num; i++) {
2854                 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2855                 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
2856                                      GFP_KERNEL) != 0) {
2857                         netdev_err(pp->dev,
2858                                    "%s:rxq %d, %d of %d buffs  filled\n",
2859                                    __func__, rxq->id, i, num);
2860                         break;
2861                 }
2862         }
2863 
2864         /* Add this number of RX descriptors as non occupied (ready to
2865          * get packets)
2866          */
2867         mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2868 
2869         return i;
2870 }
2871 
2872 /* Free all packets pending transmit from all TXQs and reset TX port */
2873 static void mvneta_tx_reset(struct mvneta_port *pp)
2874 {
2875         int queue;
2876 
2877         /* free the skb's in the tx ring */
2878         for (queue = 0; queue < txq_number; queue++)
2879                 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2880 
2881         mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2882         mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2883 }
2884 
2885 static void mvneta_rx_reset(struct mvneta_port *pp)
2886 {
2887         mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2888         mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2889 }
2890 
2891 /* Rx/Tx queue initialization/cleanup methods */
2892 
2893 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2894                               struct mvneta_rx_queue *rxq)
2895 {
2896         rxq->size = pp->rx_ring_size;
2897 
2898         /* Allocate memory for RX descriptors */
2899         rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2900                                         rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2901                                         &rxq->descs_phys, GFP_KERNEL);
2902         if (!rxq->descs)
2903                 return -ENOMEM;
2904 
2905         rxq->last_desc = rxq->size - 1;
2906 
2907         return 0;
2908 }
2909 
2910 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2911                                struct mvneta_rx_queue *rxq)
2912 {
2913         /* Set Rx descriptors queue starting address */
2914         mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2915         mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2916 
2917         /* Set coalescing pkts and time */
2918         mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2919         mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2920 
2921         if (!pp->bm_priv) {
2922                 /* Set Offset */
2923                 mvneta_rxq_offset_set(pp, rxq, 0);
2924                 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
2925                                         PAGE_SIZE :
2926                                         MVNETA_RX_BUF_SIZE(pp->pkt_size));
2927                 mvneta_rxq_bm_disable(pp, rxq);
2928                 mvneta_rxq_fill(pp, rxq, rxq->size);
2929         } else {
2930                 /* Set Offset */
2931                 mvneta_rxq_offset_set(pp, rxq,
2932                                       NET_SKB_PAD - pp->rx_offset_correction);
2933 
2934                 mvneta_rxq_bm_enable(pp, rxq);
2935                 /* Fill RXQ with buffers from RX pool */
2936                 mvneta_rxq_long_pool_set(pp, rxq);
2937                 mvneta_rxq_short_pool_set(pp, rxq);
2938                 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
2939         }
2940 }
2941 
2942 /* Create a specified RX queue */
2943 static int mvneta_rxq_init(struct mvneta_port *pp,
2944                            struct mvneta_rx_queue *rxq)
2945 
2946 {
2947         int ret;
2948 
2949         ret = mvneta_rxq_sw_init(pp, rxq);
2950         if (ret < 0)
2951                 return ret;
2952 
2953         mvneta_rxq_hw_init(pp, rxq);
2954 
2955         return 0;
2956 }
2957 
2958 /* Cleanup Rx queue */
2959 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2960                               struct mvneta_rx_queue *rxq)
2961 {
2962         mvneta_rxq_drop_pkts(pp, rxq);
2963 
2964         if (rxq->skb)
2965                 dev_kfree_skb_any(rxq->skb);
2966 
2967         if (rxq->descs)
2968                 dma_free_coherent(pp->dev->dev.parent,
2969                                   rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2970                                   rxq->descs,
2971                                   rxq->descs_phys);
2972 
2973         rxq->descs             = NULL;
2974         rxq->last_desc         = 0;
2975         rxq->next_desc_to_proc = 0;
2976         rxq->descs_phys        = 0;
2977         rxq->first_to_refill   = 0;
2978         rxq->refill_num        = 0;
2979         rxq->skb               = NULL;
2980         rxq->left_size         = 0;
2981 }
2982 
2983 static int mvneta_txq_sw_init(struct mvneta_port *pp,
2984                               struct mvneta_tx_queue *txq)
2985 {
2986         int cpu;
2987 
2988         txq->size = pp->tx_ring_size;
2989 
2990         /* A queue must always have room for at least one skb.
2991          * Therefore, stop the queue when the free entries reaches
2992          * the maximum number of descriptors per skb.
2993          */
2994         txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2995         txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2996 
2997         /* Allocate memory for TX descriptors */
2998         txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2999                                         txq->size * MVNETA_DESC_ALIGNED_SIZE,
3000                                         &txq->descs_phys, GFP_KERNEL);
3001         if (!txq->descs)
3002                 return -ENOMEM;
3003 
3004         txq->last_desc = txq->size - 1;
3005 
3006         txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
3007                                     GFP_KERNEL);
3008         if (!txq->tx_skb) {
3009                 dma_free_coherent(pp->dev->dev.parent,
3010                                   txq->size * MVNETA_DESC_ALIGNED_SIZE,
3011                                   txq->descs, txq->descs_phys);
3012                 return -ENOMEM;
3013         }
3014 
3015         /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3016         txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3017                                            txq->size * TSO_HEADER_SIZE,
3018                                            &txq->tso_hdrs_phys, GFP_KERNEL);
3019         if (!txq->tso_hdrs) {
3020                 kfree(txq->tx_skb);
3021                 dma_free_coherent(pp->dev->dev.parent,
3022                                   txq->size * MVNETA_DESC_ALIGNED_SIZE,
3023                                   txq->descs, txq->descs_phys);
3024                 return -ENOMEM;
3025         }
3026 
3027         /* Setup XPS mapping */
3028         if (txq_number > 1)
3029                 cpu = txq->id % num_present_cpus();
3030         else
3031                 cpu = pp->rxq_def % num_present_cpus();
3032         cpumask_set_cpu(cpu, &txq->affinity_mask);
3033         netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3034 
3035         return 0;
3036 }
3037 
3038 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3039                                struct mvneta_tx_queue *txq)
3040 {
3041         /* Set maximum bandwidth for enabled TXQs */
3042         mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3043         mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3044 
3045         /* Set Tx descriptors queue starting address */
3046         mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3047         mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3048 
3049         mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3050 }
3051 
3052 /* Create and initialize a tx queue */
3053 static int mvneta_txq_init(struct mvneta_port *pp,
3054                            struct mvneta_tx_queue *txq)
3055 {
3056         int ret;
3057 
3058         ret = mvneta_txq_sw_init(pp, txq);
3059         if (ret < 0)
3060                 return ret;
3061 
3062         mvneta_txq_hw_init(pp, txq);
3063 
3064         return 0;
3065 }
3066 
3067 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3068 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3069                                  struct mvneta_tx_queue *txq)
3070 {
3071         struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3072 
3073         kfree(txq->tx_skb);
3074 
3075         if (txq->tso_hdrs)
3076                 dma_free_coherent(pp->dev->dev.parent,
3077                                   txq->size * TSO_HEADER_SIZE,
3078                                   txq->tso_hdrs, txq->tso_hdrs_phys);
3079         if (txq->descs)
3080                 dma_free_coherent(pp->dev->dev.parent,
3081                                   txq->size * MVNETA_DESC_ALIGNED_SIZE,
3082                                   txq->descs, txq->descs_phys);
3083 
3084         netdev_tx_reset_queue(nq);
3085 
3086         txq->descs             = NULL;
3087         txq->last_desc         = 0;
3088         txq->next_desc_to_proc = 0;
3089         txq->descs_phys        = 0;
3090 }
3091 
3092 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3093                                  struct mvneta_tx_queue *txq)
3094 {
3095         /* Set minimum bandwidth for disabled TXQs */
3096         mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3097         mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3098 
3099         /* Set Tx descriptors queue starting address and size */
3100         mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3101         mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3102 }
3103 
3104 static void mvneta_txq_deinit(struct mvneta_port *pp,
3105                               struct mvneta_tx_queue *txq)
3106 {
3107         mvneta_txq_sw_deinit(pp, txq);
3108         mvneta_txq_hw_deinit(pp, txq);
3109 }
3110 
3111 /* Cleanup all Tx queues */
3112 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3113 {
3114         int queue;
3115 
3116         for (queue = 0; queue < txq_number; queue++)
3117                 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3118 }
3119 
3120 /* Cleanup all Rx queues */
3121 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3122 {
3123         int queue;
3124 
3125         for (queue = 0; queue < rxq_number; queue++)
3126                 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3127 }
3128 
3129 
3130 /* Init all Rx queues */
3131 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3132 {
3133         int queue;
3134 
3135         for (queue = 0; queue < rxq_number; queue++) {
3136                 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3137 
3138                 if (err) {
3139                         netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3140                                    __func__, queue);
3141                         mvneta_cleanup_rxqs(pp);
3142                         return err;
3143                 }
3144         }
3145 
3146         return 0;
3147 }
3148 
3149 /* Init all tx queues */
3150 static int mvneta_setup_txqs(struct mvneta_port *pp)
3151 {
3152         int queue;
3153 
3154         for (queue = 0; queue < txq_number; queue++) {
3155                 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3156                 if (err) {
3157                         netdev_err(pp->dev, "%s: can't create txq=%d\n",
3158                                    __func__, queue);
3159                         mvneta_cleanup_txqs(pp);
3160                         return err;
3161                 }
3162         }
3163 
3164         return 0;
3165 }
3166 
3167 static int mvneta_comphy_init(struct mvneta_port *pp)
3168 {
3169         int ret;
3170 
3171         if (!pp->comphy)
3172                 return 0;
3173 
3174         ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3175                                pp->phy_interface);
3176         if (ret)
3177                 return ret;
3178 
3179         return phy_power_on(pp->comphy);
3180 }
3181 
3182 static void mvneta_start_dev(struct mvneta_port *pp)
3183 {
3184         int cpu;
3185 
3186         WARN_ON(mvneta_comphy_init(pp));
3187 
3188         mvneta_max_rx_size_set(pp, pp->pkt_size);
3189         mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3190 
3191         /* start the Rx/Tx activity */
3192         mvneta_port_enable(pp);
3193 
3194         if (!pp->neta_armada3700) {
3195                 /* Enable polling on the port */
3196                 for_each_online_cpu(cpu) {
3197                         struct mvneta_pcpu_port *port =
3198                                 per_cpu_ptr(pp->ports, cpu);
3199 
3200                         napi_enable(&port->napi);
3201                 }
3202         } else {
3203                 napi_enable(&pp->napi);
3204         }
3205 
3206         /* Unmask interrupts. It has to be done from each CPU */
3207         on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3208 
3209         mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3210                     MVNETA_CAUSE_PHY_STATUS_CHANGE |
3211                     MVNETA_CAUSE_LINK_CHANGE);
3212 
3213         phylink_start(pp->phylink);
3214         netif_tx_start_all_queues(pp->dev);
3215 }
3216 
3217 static void mvneta_stop_dev(struct mvneta_port *pp)
3218 {
3219         unsigned int cpu;
3220 
3221         phylink_stop(pp->phylink);
3222 
3223         if (!pp->neta_armada3700) {
3224                 for_each_online_cpu(cpu) {
3225                         struct mvneta_pcpu_port *port =
3226                                 per_cpu_ptr(pp->ports, cpu);
3227 
3228                         napi_disable(&port->napi);
3229                 }
3230         } else {
3231                 napi_disable(&pp->napi);
3232         }
3233 
3234         netif_carrier_off(pp->dev);
3235 
3236         mvneta_port_down(pp);
3237         netif_tx_stop_all_queues(pp->dev);
3238 
3239         /* Stop the port activity */
3240         mvneta_port_disable(pp);
3241 
3242         /* Clear all ethernet port interrupts */
3243         on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3244 
3245         /* Mask all ethernet port interrupts */
3246         on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3247 
3248         mvneta_tx_reset(pp);
3249         mvneta_rx_reset(pp);
3250 
3251         WARN_ON(phy_power_off(pp->comphy));
3252 }
3253 
3254 static void mvneta_percpu_enable(void *arg)
3255 {
3256         struct mvneta_port *pp = arg;
3257 
3258         enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3259 }
3260 
3261 static void mvneta_percpu_disable(void *arg)
3262 {
3263         struct mvneta_port *pp = arg;
3264 
3265         disable_percpu_irq(pp->dev->irq);
3266 }
3267 
3268 /* Change the device mtu */
3269 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3270 {
3271         struct mvneta_port *pp = netdev_priv(dev);
3272         int ret;
3273 
3274         if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3275                 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3276                             mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3277                 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3278         }
3279 
3280         dev->mtu = mtu;
3281 
3282         if (!netif_running(dev)) {
3283                 if (pp->bm_priv)
3284                         mvneta_bm_update_mtu(pp, mtu);
3285 
3286                 netdev_update_features(dev);
3287                 return 0;
3288         }
3289 
3290         /* The interface is running, so we have to force a
3291          * reallocation of the queues
3292          */
3293         mvneta_stop_dev(pp);
3294         on_each_cpu(mvneta_percpu_disable, pp, true);
3295 
3296         mvneta_cleanup_txqs(pp);
3297         mvneta_cleanup_rxqs(pp);
3298 
3299         if (pp->bm_priv)
3300                 mvneta_bm_update_mtu(pp, mtu);
3301 
3302         pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3303 
3304         ret = mvneta_setup_rxqs(pp);
3305         if (ret) {
3306                 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3307                 return ret;
3308         }
3309 
3310         ret = mvneta_setup_txqs(pp);
3311         if (ret) {
3312                 netdev_err(dev, "unable to setup txqs after MTU change\n");
3313                 return ret;
3314         }
3315 
3316         on_each_cpu(mvneta_percpu_enable, pp, true);
3317         mvneta_start_dev(pp);
3318 
3319         netdev_update_features(dev);
3320 
3321         return 0;
3322 }
3323 
3324 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3325                                              netdev_features_t features)
3326 {
3327         struct mvneta_port *pp = netdev_priv(dev);
3328 
3329         if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3330                 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3331                 netdev_info(dev,
3332                             "Disable IP checksum for MTU greater than %dB\n",
3333                             pp->tx_csum_limit);
3334         }
3335 
3336         return features;
3337 }
3338 
3339 /* Get mac address */
3340 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3341 {
3342         u32 mac_addr_l, mac_addr_h;
3343 
3344         mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3345         mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3346         addr[0] = (mac_addr_h >> 24) & 0xFF;
3347         addr[1] = (mac_addr_h >> 16) & 0xFF;
3348         addr[2] = (mac_addr_h >> 8) & 0xFF;
3349         addr[3] = mac_addr_h & 0xFF;
3350         addr[4] = (mac_addr_l >> 8) & 0xFF;
3351         addr[5] = mac_addr_l & 0xFF;
3352 }
3353 
3354 /* Handle setting mac address */
3355 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3356 {
3357         struct mvneta_port *pp = netdev_priv(dev);
3358         struct sockaddr *sockaddr = addr;
3359         int ret;
3360 
3361         ret = eth_prepare_mac_addr_change(dev, addr);
3362         if (ret < 0)
3363                 return ret;
3364         /* Remove previous address table entry */
3365         mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3366 
3367         /* Set new addr in hw */
3368         mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3369 
3370         eth_commit_mac_addr_change(dev, addr);
3371         return 0;
3372 }
3373 
3374 static void mvneta_validate(struct phylink_config *config,
3375                             unsigned long *supported,
3376                             struct phylink_link_state *state)
3377 {
3378         struct net_device *ndev = to_net_dev(config->dev);
3379         struct mvneta_port *pp = netdev_priv(ndev);
3380         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3381 
3382         /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3383         if (state->interface != PHY_INTERFACE_MODE_NA &&
3384             state->interface != PHY_INTERFACE_MODE_QSGMII &&
3385             state->interface != PHY_INTERFACE_MODE_SGMII &&
3386             !phy_interface_mode_is_8023z(state->interface) &&
3387             !phy_interface_mode_is_rgmii(state->interface)) {
3388                 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3389                 return;
3390         }
3391 
3392         /* Allow all the expected bits */
3393         phylink_set(mask, Autoneg);
3394         phylink_set_port_modes(mask);
3395 
3396         /* Asymmetric pause is unsupported */
3397         phylink_set(mask, Pause);
3398 
3399         /* Half-duplex at speeds higher than 100Mbit is unsupported */
3400         if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3401                 phylink_set(mask, 1000baseT_Full);
3402                 phylink_set(mask, 1000baseX_Full);
3403         }
3404         if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3405                 phylink_set(mask, 2500baseT_Full);
3406                 phylink_set(mask, 2500baseX_Full);
3407         }
3408 
3409         if (!phy_interface_mode_is_8023z(state->interface)) {
3410                 /* 10M and 100M are only supported in non-802.3z mode */
3411                 phylink_set(mask, 10baseT_Half);
3412                 phylink_set(mask, 10baseT_Full);
3413                 phylink_set(mask, 100baseT_Half);
3414                 phylink_set(mask, 100baseT_Full);
3415         }
3416 
3417         bitmap_and(supported, supported, mask,
3418                    __ETHTOOL_LINK_MODE_MASK_NBITS);
3419         bitmap_and(state->advertising, state->advertising, mask,
3420                    __ETHTOOL_LINK_MODE_MASK_NBITS);
3421 
3422         /* We can only operate at 2500BaseX or 1000BaseX.  If requested
3423          * to advertise both, only report advertising at 2500BaseX.
3424          */
3425         phylink_helper_basex_speed(state);
3426 }
3427 
3428 static int mvneta_mac_link_state(struct phylink_config *config,
3429                                  struct phylink_link_state *state)
3430 {
3431         struct net_device *ndev = to_net_dev(config->dev);
3432         struct mvneta_port *pp = netdev_priv(ndev);
3433         u32 gmac_stat;
3434 
3435         gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3436 
3437         if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3438                 state->speed =
3439                         state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3440                         SPEED_2500 : SPEED_1000;
3441         else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3442                 state->speed = SPEED_100;
3443         else
3444                 state->speed = SPEED_10;
3445 
3446         state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3447         state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3448         state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3449 
3450         state->pause = 0;
3451         if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3452                 state->pause |= MLO_PAUSE_RX;
3453         if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3454                 state->pause |= MLO_PAUSE_TX;
3455 
3456         return 1;
3457 }
3458 
3459 static void mvneta_mac_an_restart(struct phylink_config *config)
3460 {
3461         struct net_device *ndev = to_net_dev(config->dev);
3462         struct mvneta_port *pp = netdev_priv(ndev);
3463         u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3464 
3465         mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3466                     gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3467         mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3468                     gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3469 }
3470 
3471 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3472                               const struct phylink_link_state *state)
3473 {
3474         struct net_device *ndev = to_net_dev(config->dev);
3475         struct mvneta_port *pp = netdev_priv(ndev);
3476         u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3477         u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3478         u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3479         u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3480         u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3481 
3482         new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3483         new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3484                                    MVNETA_GMAC2_PORT_RESET);
3485         new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3486         new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3487         new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3488                              MVNETA_GMAC_INBAND_RESTART_AN |
3489                              MVNETA_GMAC_CONFIG_MII_SPEED |
3490                              MVNETA_GMAC_CONFIG_GMII_SPEED |
3491                              MVNETA_GMAC_AN_SPEED_EN |
3492                              MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3493                              MVNETA_GMAC_CONFIG_FLOW_CTRL |
3494                              MVNETA_GMAC_AN_FLOW_CTRL_EN |
3495                              MVNETA_GMAC_CONFIG_FULL_DUPLEX |
3496                              MVNETA_GMAC_AN_DUPLEX_EN);
3497 
3498         /* Even though it might look weird, when we're configured in
3499          * SGMII or QSGMII mode, the RGMII bit needs to be set.
3500          */
3501         new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3502 
3503         if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3504             state->interface == PHY_INTERFACE_MODE_SGMII ||
3505             phy_interface_mode_is_8023z(state->interface))
3506                 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3507 
3508         if (phylink_test(state->advertising, Pause))
3509                 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3510         if (state->pause & MLO_PAUSE_TXRX_MASK)
3511                 new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3512 
3513         if (!phylink_autoneg_inband(mode)) {
3514                 /* Phy or fixed speed */
3515                 if (state->duplex)
3516                         new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3517 
3518                 if (state->speed == SPEED_1000 || state->speed == SPEED_2500)
3519                         new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3520                 else if (state->speed == SPEED_100)
3521                         new_an |= MVNETA_GMAC_CONFIG_MII_SPEED;
3522         } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3523                 /* SGMII mode receives the state from the PHY */
3524                 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3525                 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3526                 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3527                                      MVNETA_GMAC_FORCE_LINK_PASS)) |
3528                          MVNETA_GMAC_INBAND_AN_ENABLE |
3529                          MVNETA_GMAC_AN_SPEED_EN |
3530                          MVNETA_GMAC_AN_DUPLEX_EN;
3531         } else {
3532                 /* 802.3z negotiation - only 1000base-X */
3533                 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3534                 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3535                 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3536                                      MVNETA_GMAC_FORCE_LINK_PASS)) |
3537                          MVNETA_GMAC_INBAND_AN_ENABLE |
3538                          MVNETA_GMAC_CONFIG_GMII_SPEED |
3539                          /* The MAC only supports FD mode */
3540                          MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3541 
3542                 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3543                         new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3544         }
3545 
3546         /* Armada 370 documentation says we can only change the port mode
3547          * and in-band enable when the link is down, so force it down
3548          * while making these changes. We also do this for GMAC_CTRL2 */
3549         if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3550             (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3551             (new_an  ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3552                 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3553                             (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3554                             MVNETA_GMAC_FORCE_LINK_DOWN);
3555         }
3556 
3557 
3558         /* When at 2.5G, the link partner can send frames with shortened
3559          * preambles.
3560          */
3561         if (state->speed == SPEED_2500)
3562                 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3563 
3564         if (pp->comphy && pp->phy_interface != state->interface &&
3565             (state->interface == PHY_INTERFACE_MODE_SGMII ||
3566              state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3567              state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3568                 pp->phy_interface = state->interface;
3569 
3570                 WARN_ON(phy_power_off(pp->comphy));
3571                 WARN_ON(mvneta_comphy_init(pp));
3572         }
3573 
3574         if (new_ctrl0 != gmac_ctrl0)
3575                 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3576         if (new_ctrl2 != gmac_ctrl2)
3577                 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3578         if (new_ctrl4 != gmac_ctrl4)
3579                 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
3580         if (new_clk != gmac_clk)
3581                 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3582         if (new_an != gmac_an)
3583                 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3584 
3585         if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3586                 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3587                         MVNETA_GMAC2_PORT_RESET) != 0)
3588                         continue;
3589         }
3590 }
3591 
3592 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3593 {
3594         u32 lpi_ctl1;
3595 
3596         lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3597         if (enable)
3598                 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3599         else
3600                 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3601         mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3602 }
3603 
3604 static void mvneta_mac_link_down(struct phylink_config *config,
3605                                  unsigned int mode, phy_interface_t interface)
3606 {
3607         struct net_device *ndev = to_net_dev(config->dev);
3608         struct mvneta_port *pp = netdev_priv(ndev);
3609         u32 val;
3610 
3611         mvneta_port_down(pp);
3612 
3613         if (!phylink_autoneg_inband(mode)) {
3614                 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3615                 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3616                 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3617                 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3618         }
3619 
3620         pp->eee_active = false;
3621         mvneta_set_eee(pp, false);
3622 }
3623 
3624 static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
3625                                phy_interface_t interface,
3626                                struct phy_device *phy)
3627 {
3628         struct net_device *ndev = to_net_dev(config->dev);
3629         struct mvneta_port *pp = netdev_priv(ndev);
3630         u32 val;
3631 
3632         if (!phylink_autoneg_inband(mode)) {
3633                 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3634                 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3635                 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3636                 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3637         }
3638 
3639         mvneta_port_up(pp);
3640 
3641         if (phy && pp->eee_enabled) {
3642                 pp->eee_active = phy_init_eee(phy, 0) >= 0;
3643                 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
3644         }
3645 }
3646 
3647 static const struct phylink_mac_ops mvneta_phylink_ops = {
3648         .validate = mvneta_validate,
3649         .mac_link_state = mvneta_mac_link_state,
3650         .mac_an_restart = mvneta_mac_an_restart,
3651         .mac_config = mvneta_mac_config,
3652         .mac_link_down = mvneta_mac_link_down,
3653         .mac_link_up = mvneta_mac_link_up,
3654 };
3655 
3656 static int mvneta_mdio_probe(struct mvneta_port *pp)
3657 {
3658         struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
3659         int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
3660 
3661         if (err)
3662                 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
3663 
3664         phylink_ethtool_get_wol(pp->phylink, &wol);
3665         device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
3666 
3667         return err;
3668 }
3669 
3670 static void mvneta_mdio_remove(struct mvneta_port *pp)
3671 {
3672         phylink_disconnect_phy(pp->phylink);
3673 }
3674 
3675 /* Electing a CPU must be done in an atomic way: it should be done
3676  * after or before the removal/insertion of a CPU and this function is
3677  * not reentrant.
3678  */
3679 static void mvneta_percpu_elect(struct mvneta_port *pp)
3680 {
3681         int elected_cpu = 0, max_cpu, cpu, i = 0;
3682 
3683         /* Use the cpu associated to the rxq when it is online, in all
3684          * the other cases, use the cpu 0 which can't be offline.
3685          */
3686         if (cpu_online(pp->rxq_def))
3687                 elected_cpu = pp->rxq_def;
3688 
3689         max_cpu = num_present_cpus();
3690 
3691         for_each_online_cpu(cpu) {
3692                 int rxq_map = 0, txq_map = 0;
3693                 int rxq;
3694 
3695                 for (rxq = 0; rxq < rxq_number; rxq++)
3696                         if ((rxq % max_cpu) == cpu)
3697                                 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3698 
3699                 if (cpu == elected_cpu)
3700                         /* Map the default receive queue queue to the
3701                          * elected CPU
3702                          */
3703                         rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
3704 
3705                 /* We update the TX queue map only if we have one
3706                  * queue. In this case we associate the TX queue to
3707                  * the CPU bound to the default RX queue
3708                  */
3709                 if (txq_number == 1)
3710                         txq_map = (cpu == elected_cpu) ?
3711                                 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3712                 else
3713                         txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3714                                 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3715 
3716                 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3717 
3718                 /* Update the interrupt mask on each CPU according the
3719                  * new mapping
3720                  */
3721                 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3722                                          pp, true);
3723                 i++;
3724 
3725         }
3726 };
3727 
3728 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3729 {
3730         int other_cpu;
3731         struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3732                                                   node_online);
3733         struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3734 
3735 
3736         spin_lock(&pp->lock);
3737         /*
3738          * Configuring the driver for a new CPU while the driver is
3739          * stopping is racy, so just avoid it.
3740          */
3741         if (pp->is_stopped) {
3742                 spin_unlock(&pp->lock);
3743                 return 0;
3744         }
3745         netif_tx_stop_all_queues(pp->dev);
3746 
3747         /*
3748          * We have to synchronise on tha napi of each CPU except the one
3749          * just being woken up
3750          */
3751         for_each_online_cpu(other_cpu) {
3752                 if (other_cpu != cpu) {
3753                         struct mvneta_pcpu_port *other_port =
3754                                 per_cpu_ptr(pp->ports, other_cpu);
3755 
3756                         napi_synchronize(&other_port->napi);
3757                 }
3758         }
3759 
3760         /* Mask all ethernet port interrupts */
3761         on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3762         napi_enable(&port->napi);
3763 
3764         /*
3765          * Enable per-CPU interrupts on the CPU that is
3766          * brought up.
3767          */
3768         mvneta_percpu_enable(pp);
3769 
3770         /*
3771          * Enable per-CPU interrupt on the one CPU we care
3772          * about.
3773          */
3774         mvneta_percpu_elect(pp);
3775 
3776         /* Unmask all ethernet port interrupts */
3777         on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3778         mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3779                     MVNETA_CAUSE_PHY_STATUS_CHANGE |
3780                     MVNETA_CAUSE_LINK_CHANGE);
3781         netif_tx_start_all_queues(pp->dev);
3782         spin_unlock(&pp->lock);
3783         return 0;
3784 }
3785 
3786 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3787 {
3788         struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3789                                                   node_online);
3790         struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3791 
3792         /*
3793          * Thanks to this lock we are sure that any pending cpu election is
3794          * done.
3795          */
3796         spin_lock(&pp->lock);
3797         /* Mask all ethernet port interrupts */
3798         on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3799         spin_unlock(&pp->lock);
3800 
3801         napi_synchronize(&port->napi);
3802         napi_disable(&port->napi);
3803         /* Disable per-CPU interrupts on the CPU that is brought down. */
3804         mvneta_percpu_disable(pp);
3805         return 0;
3806 }
3807 
3808 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3809 {
3810         struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3811                                                   node_dead);
3812 
3813         /* Check if a new CPU must be elected now this on is down */
3814         spin_lock(&pp->lock);
3815         mvneta_percpu_elect(pp);
3816         spin_unlock(&pp->lock);
3817         /* Unmask all ethernet port interrupts */
3818         on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3819         mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3820                     MVNETA_CAUSE_PHY_STATUS_CHANGE |
3821                     MVNETA_CAUSE_LINK_CHANGE);
3822         netif_tx_start_all_queues(pp->dev);
3823         return 0;
3824 }
3825 
3826 static int mvneta_open(struct net_device *dev)
3827 {
3828         struct mvneta_port *pp = netdev_priv(dev);
3829         int ret;
3830 
3831         pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
3832 
3833         ret = mvneta_setup_rxqs(pp);
3834         if (ret)
3835                 return ret;
3836 
3837         ret = mvneta_setup_txqs(pp);
3838         if (ret)
3839                 goto err_cleanup_rxqs;
3840 
3841         /* Connect to port interrupt line */
3842         if (pp->neta_armada3700)
3843                 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3844                                   dev->name, pp);
3845         else
3846                 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3847                                          dev->name, pp->ports);
3848         if (ret) {
3849                 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3850                 goto err_cleanup_txqs;
3851         }
3852 
3853         if (!pp->neta_armada3700) {
3854                 /* Enable per-CPU interrupt on all the CPU to handle our RX
3855                  * queue interrupts
3856                  */
3857                 on_each_cpu(mvneta_percpu_enable, pp, true);
3858 
3859                 pp->is_stopped = false;
3860                 /* Register a CPU notifier to handle the case where our CPU
3861                  * might be taken offline.
3862                  */
3863                 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3864                                                        &pp->node_online);
3865                 if (ret)
3866                         goto err_free_irq;
3867 
3868                 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3869                                                        &pp->node_dead);
3870                 if (ret)
3871                         goto err_free_online_hp;
3872         }
3873 
3874         ret = mvneta_mdio_probe(pp);
3875         if (ret < 0) {
3876                 netdev_err(dev, "cannot probe MDIO bus\n");
3877                 goto err_free_dead_hp;
3878         }
3879 
3880         mvneta_start_dev(pp);
3881 
3882         return 0;
3883 
3884 err_free_dead_hp:
3885         if (!pp->neta_armada3700)
3886                 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3887                                                     &pp->node_dead);
3888 err_free_online_hp:
3889         if (!pp->neta_armada3700)
3890                 cpuhp_state_remove_instance_nocalls(online_hpstate,
3891                                                     &pp->node_online);
3892 err_free_irq:
3893         if (pp->neta_armada3700) {
3894                 free_irq(pp->dev->irq, pp);
3895         } else {
3896                 on_each_cpu(mvneta_percpu_disable, pp, true);
3897                 free_percpu_irq(pp->dev->irq, pp->ports);
3898         }
3899 err_cleanup_txqs:
3900         mvneta_cleanup_txqs(pp);
3901 err_cleanup_rxqs:
3902         mvneta_cleanup_rxqs(pp);
3903         return ret;
3904 }
3905 
3906 /* Stop the port, free port interrupt line */
3907 static int mvneta_stop(struct net_device *dev)
3908 {
3909         struct mvneta_port *pp = netdev_priv(dev);
3910 
3911         if (!pp->neta_armada3700) {
3912                 /* Inform that we are stopping so we don't want to setup the
3913                  * driver for new CPUs in the notifiers. The code of the
3914                  * notifier for CPU online is protected by the same spinlock,
3915                  * so when we get the lock, the notifer work is done.
3916                  */
3917                 spin_lock(&pp->lock);
3918                 pp->is_stopped = true;
3919                 spin_unlock(&pp->lock);
3920 
3921                 mvneta_stop_dev(pp);
3922                 mvneta_mdio_remove(pp);
3923 
3924                 cpuhp_state_remove_instance_nocalls(online_hpstate,
3925                                                     &pp->node_online);
3926                 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3927                                                     &pp->node_dead);
3928                 on_each_cpu(mvneta_percpu_disable, pp, true);
3929                 free_percpu_irq(dev->irq, pp->ports);
3930         } else {
3931                 mvneta_stop_dev(pp);
3932                 mvneta_mdio_remove(pp);
3933                 free_irq(dev->irq, pp);
3934         }
3935 
3936         mvneta_cleanup_rxqs(pp);
3937         mvneta_cleanup_txqs(pp);
3938 
3939         return 0;
3940 }
3941 
3942 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3943 {
3944         struct mvneta_port *pp = netdev_priv(dev);
3945 
3946         return phylink_mii_ioctl(pp->phylink, ifr, cmd);
3947 }
3948 
3949 /* Ethtool methods */
3950 
3951 /* Set link ksettings (phy address, speed) for ethtools */
3952 static int
3953 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3954                                   const struct ethtool_link_ksettings *cmd)
3955 {
3956         struct mvneta_port *pp = netdev_priv(ndev);
3957 
3958         return phylink_ethtool_ksettings_set(pp->phylink, cmd);
3959 }
3960 
3961 /* Get link ksettings for ethtools */
3962 static int
3963 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
3964                                   struct ethtool_link_ksettings *cmd)
3965 {
3966         struct mvneta_port *pp = netdev_priv(ndev);
3967 
3968         return phylink_ethtool_ksettings_get(pp->phylink, cmd);
3969 }
3970 
3971 static int mvneta_ethtool_nway_reset(struct net_device *dev)
3972 {
3973         struct mvneta_port *pp = netdev_priv(dev);
3974 
3975         return phylink_ethtool_nway_reset(pp->phylink);
3976 }
3977 
3978 /* Set interrupt coalescing for ethtools */
3979 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3980                                        struct ethtool_coalesce *c)
3981 {
3982         struct mvneta_port *pp = netdev_priv(dev);
3983         int queue;
3984 
3985         for (queue = 0; queue < rxq_number; queue++) {
3986                 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3987                 rxq->time_coal = c->rx_coalesce_usecs;
3988                 rxq->pkts_coal = c->rx_max_coalesced_frames;
3989                 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3990                 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3991         }
3992 
3993         for (queue = 0; queue < txq_number; queue++) {
3994                 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3995                 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3996                 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3997         }
3998 
3999         return 0;
4000 }
4001 
4002 /* get coalescing for ethtools */
4003 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
4004                                        struct ethtool_coalesce *c)
4005 {
4006         struct mvneta_port *pp = netdev_priv(dev);
4007 
4008         c->rx_coalesce_usecs        = pp->rxqs[0].time_coal;
4009         c->rx_max_coalesced_frames  = pp->rxqs[0].pkts_coal;
4010 
4011         c->tx_max_coalesced_frames =  pp->txqs[0].done_pkts_coal;
4012         return 0;
4013 }
4014 
4015 
4016 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4017                                     struct ethtool_drvinfo *drvinfo)
4018 {
4019         strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4020                 sizeof(drvinfo->driver));
4021         strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4022                 sizeof(drvinfo->version));
4023         strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4024                 sizeof(drvinfo->bus_info));
4025 }
4026 
4027 
4028 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4029                                          struct ethtool_ringparam *ring)
4030 {
4031         struct mvneta_port *pp = netdev_priv(netdev);
4032 
4033         ring->rx_max_pending = MVNETA_MAX_RXD;
4034         ring->tx_max_pending = MVNETA_MAX_TXD;
4035         ring->rx_pending = pp->rx_ring_size;
4036         ring->tx_pending = pp->tx_ring_size;
4037 }
4038 
4039 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4040                                         struct ethtool_ringparam *ring)
4041 {
4042         struct mvneta_port *pp = netdev_priv(dev);
4043 
4044         if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4045                 return -EINVAL;
4046         pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4047                 ring->rx_pending : MVNETA_MAX_RXD;
4048 
4049         pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4050                                    MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4051         if (pp->tx_ring_size != ring->tx_pending)
4052                 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4053                             pp->tx_ring_size, ring->tx_pending);
4054 
4055         if (netif_running(dev)) {
4056                 mvneta_stop(dev);
4057                 if (mvneta_open(dev)) {
4058                         netdev_err(dev,
4059                                    "error on opening device after ring param change\n");
4060                         return -ENOMEM;
4061                 }
4062         }
4063 
4064         return 0;
4065 }
4066 
4067 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4068                                           struct ethtool_pauseparam *pause)
4069 {
4070         struct mvneta_port *pp = netdev_priv(dev);
4071 
4072         phylink_ethtool_get_pauseparam(pp->phylink, pause);
4073 }
4074 
4075 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4076                                          struct ethtool_pauseparam *pause)
4077 {
4078         struct mvneta_port *pp = netdev_priv(dev);
4079 
4080         return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4081 }
4082 
4083 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4084                                        u8 *data)
4085 {
4086         if (sset == ETH_SS_STATS) {
4087                 int i;
4088 
4089                 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4090                         memcpy(data + i * ETH_GSTRING_LEN,
4091                                mvneta_statistics[i].name, ETH_GSTRING_LEN);
4092         }
4093 }
4094 
4095 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4096 {
4097         const struct mvneta_statistic *s;
4098         void __iomem *base = pp->base;
4099         u32 high, low;
4100         u64 val;
4101         int i;
4102 
4103         for (i = 0, s = mvneta_statistics;
4104              s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4105              s++, i++) {
4106                 val = 0;
4107 
4108                 switch (s->type) {
4109                 case T_REG_32:
4110                         val = readl_relaxed(base + s->offset);
4111                         break;
4112                 case T_REG_64:
4113                         /* Docs say to read low 32-bit then high */
4114                         low = readl_relaxed(base + s->offset);
4115                         high = readl_relaxed(base + s->offset + 4);
4116                         val = (u64)high << 32 | low;
4117                         break;
4118                 case T_SW:
4119                         switch (s->offset) {
4120                         case ETHTOOL_STAT_EEE_WAKEUP:
4121                                 val = phylink_get_eee_err(pp->phylink);
4122                                 break;
4123                         case ETHTOOL_STAT_SKB_ALLOC_ERR:
4124                                 val = pp->rxqs[0].skb_alloc_err;
4125                                 break;
4126                         case ETHTOOL_STAT_REFILL_ERR:
4127                                 val = pp->rxqs[0].refill_err;
4128                                 break;
4129                         }
4130                         break;
4131                 }
4132 
4133                 pp->ethtool_stats[i] += val;
4134         }
4135 }
4136 
4137 static void mvneta_ethtool_get_stats(struct net_device *dev,
4138                                      struct ethtool_stats *stats, u64 *data)
4139 {
4140         struct mvneta_port *pp = netdev_priv(dev);
4141         int i;
4142 
4143         mvneta_ethtool_update_stats(pp);
4144 
4145         for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4146                 *data++ = pp->ethtool_stats[i];
4147 }
4148 
4149 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4150 {
4151         if (sset == ETH_SS_STATS)
4152                 return ARRAY_SIZE(mvneta_statistics);
4153         return -EOPNOTSUPP;
4154 }
4155 
4156 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4157 {
4158         return MVNETA_RSS_LU_TABLE_SIZE;
4159 }
4160 
4161 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4162                                     struct ethtool_rxnfc *info,
4163                                     u32 *rules __always_unused)
4164 {
4165         switch (info->cmd) {
4166         case ETHTOOL_GRXRINGS:
4167                 info->data =  rxq_number;
4168                 return 0;
4169         case ETHTOOL_GRXFH:
4170                 return -EOPNOTSUPP;
4171         default:
4172                 return -EOPNOTSUPP;
4173         }
4174 }
4175 
4176 static int  mvneta_config_rss(struct mvneta_port *pp)
4177 {
4178         int cpu;
4179         u32 val;
4180 
4181         netif_tx_stop_all_queues(pp->dev);
4182 
4183         on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4184 
4185         if (!pp->neta_armada3700) {
4186                 /* We have to synchronise on the napi of each CPU */
4187                 for_each_online_cpu(cpu) {
4188                         struct mvneta_pcpu_port *pcpu_port =
4189                                 per_cpu_ptr(pp->ports, cpu);
4190 
4191                         napi_synchronize(&pcpu_port->napi);
4192                         napi_disable(&pcpu_port->napi);
4193                 }
4194         } else {
4195                 napi_synchronize(&pp->napi);
4196                 napi_disable(&pp->napi);
4197         }
4198 
4199         pp->rxq_def = pp->indir[0];
4200 
4201         /* Update unicast mapping */
4202         mvneta_set_rx_mode(pp->dev);
4203 
4204         /* Update val of portCfg register accordingly with all RxQueue types */
4205         val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4206         mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4207 
4208         /* Update the elected CPU matching the new rxq_def */
4209         spin_lock(&pp->lock);
4210         mvneta_percpu_elect(pp);
4211         spin_unlock(&pp->lock);
4212 
4213         if (!pp->neta_armada3700) {
4214                 /* We have to synchronise on the napi of each CPU */
4215                 for_each_online_cpu(cpu) {
4216                         struct mvneta_pcpu_port *pcpu_port =
4217                                 per_cpu_ptr(pp->ports, cpu);
4218 
4219                         napi_enable(&pcpu_port->napi);
4220                 }
4221         } else {
4222                 napi_enable(&pp->napi);
4223         }
4224 
4225         netif_tx_start_all_queues(pp->dev);
4226 
4227         return 0;
4228 }
4229 
4230 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4231                                    const u8 *key, const u8 hfunc)
4232 {
4233         struct mvneta_port *pp = netdev_priv(dev);
4234 
4235         /* Current code for Armada 3700 doesn't support RSS features yet */
4236         if (pp->neta_armada3700)
4237                 return -EOPNOTSUPP;
4238 
4239         /* We require at least one supported parameter to be changed
4240          * and no change in any of the unsupported parameters
4241          */
4242         if (key ||
4243             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4244                 return -EOPNOTSUPP;
4245 
4246         if (!indir)
4247                 return 0;
4248 
4249         memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4250 
4251         return mvneta_config_rss(pp);
4252 }
4253 
4254 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4255                                    u8 *hfunc)
4256 {
4257         struct mvneta_port *pp = netdev_priv(dev);
4258 
4259         /* Current code for Armada 3700 doesn't support RSS features yet */
4260         if (pp->neta_armada3700)
4261                 return -EOPNOTSUPP;
4262 
4263         if (hfunc)
4264                 *hfunc = ETH_RSS_HASH_TOP;
4265 
4266         if (!indir)
4267                 return 0;
4268 
4269         memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4270 
4271         return 0;
4272 }
4273 
4274 static void mvneta_ethtool_get_wol(struct net_device *dev,
4275                                    struct ethtool_wolinfo *wol)
4276 {
4277         struct mvneta_port *pp = netdev_priv(dev);
4278 
4279         phylink_ethtool_get_wol(pp->phylink, wol);
4280 }
4281 
4282 static int mvneta_ethtool_set_wol(struct net_device *dev,
4283                                   struct ethtool_wolinfo *wol)
4284 {
4285         struct mvneta_port *pp = netdev_priv(dev);
4286         int ret;
4287 
4288         ret = phylink_ethtool_set_wol(pp->phylink, wol);
4289         if (!ret)
4290                 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4291 
4292         return ret;
4293 }
4294 
4295 static int mvneta_ethtool_get_eee(struct net_device *dev,
4296                                   struct ethtool_eee *eee)
4297 {
4298         struct mvneta_port *pp = netdev_priv(dev);
4299         u32 lpi_ctl0;
4300 
4301         lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4302 
4303         eee->eee_enabled = pp->eee_enabled;
4304         eee->eee_active = pp->eee_active;
4305         eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4306         eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4307 
4308         return phylink_ethtool_get_eee(pp->phylink, eee);
4309 }
4310 
4311 static int mvneta_ethtool_set_eee(struct net_device *dev,
4312                                   struct ethtool_eee *eee)
4313 {
4314         struct mvneta_port *pp = netdev_priv(dev);
4315         u32 lpi_ctl0;
4316 
4317         /* The Armada 37x documents do not give limits for this other than
4318          * it being an 8-bit register. */
4319         if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4320                 return -EINVAL;
4321 
4322         lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4323         lpi_ctl0 &= ~(0xff << 8);
4324         lpi_ctl0 |= eee->tx_lpi_timer << 8;
4325         mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4326 
4327         pp->eee_enabled = eee->eee_enabled;
4328         pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4329 
4330         mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4331 
4332         return phylink_ethtool_set_eee(pp->phylink, eee);
4333 }
4334 
4335 static const struct net_device_ops mvneta_netdev_ops = {
4336         .ndo_open            = mvneta_open,
4337         .ndo_stop            = mvneta_stop,
4338         .ndo_start_xmit      = mvneta_tx,
4339         .ndo_set_rx_mode     = mvneta_set_rx_mode,
4340         .ndo_set_mac_address = mvneta_set_mac_addr,
4341         .ndo_change_mtu      = mvneta_change_mtu,
4342         .ndo_fix_features    = mvneta_fix_features,
4343         .ndo_get_stats64     = mvneta_get_stats64,
4344         .ndo_do_ioctl        = mvneta_ioctl,
4345 };
4346 
4347 static const struct ethtool_ops mvneta_eth_tool_ops = {
4348         .nway_reset     = mvneta_ethtool_nway_reset,
4349         .get_link       = ethtool_op_get_link,
4350         .set_coalesce   = mvneta_ethtool_set_coalesce,
4351         .get_coalesce   = mvneta_ethtool_get_coalesce,
4352         .get_drvinfo    = mvneta_ethtool_get_drvinfo,
4353         .get_ringparam  = mvneta_ethtool_get_ringparam,
4354         .set_ringparam  = mvneta_ethtool_set_ringparam,
4355         .get_pauseparam = mvneta_ethtool_get_pauseparam,
4356         .set_pauseparam = mvneta_ethtool_set_pauseparam,
4357         .get_strings    = mvneta_ethtool_get_strings,
4358         .get_ethtool_stats = mvneta_ethtool_get_stats,
4359         .get_sset_count = mvneta_ethtool_get_sset_count,
4360         .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4361         .get_rxnfc      = mvneta_ethtool_get_rxnfc,
4362         .get_rxfh       = mvneta_ethtool_get_rxfh,
4363         .set_rxfh       = mvneta_ethtool_set_rxfh,
4364         .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4365         .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4366         .get_wol        = mvneta_ethtool_get_wol,
4367         .set_wol        = mvneta_ethtool_set_wol,
4368         .get_eee        = mvneta_ethtool_get_eee,
4369         .set_eee        = mvneta_ethtool_set_eee,
4370 };
4371 
4372 /* Initialize hw */
4373 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4374 {
4375         int queue;
4376 
4377         /* Disable port */
4378         mvneta_port_disable(pp);
4379 
4380         /* Set port default values */
4381         mvneta_defaults_set(pp);
4382 
4383         pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4384         if (!pp->txqs)
4385                 return -ENOMEM;
4386 
4387         /* Initialize TX descriptor rings */
4388         for (queue = 0; queue < txq_number; queue++) {
4389                 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4390                 txq->id = queue;
4391                 txq->size = pp->tx_ring_size;
4392                 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4393         }
4394 
4395         pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4396         if (!pp->rxqs)
4397                 return -ENOMEM;
4398 
4399         /* Create Rx descriptor rings */
4400         for (queue = 0; queue < rxq_number; queue++) {
4401                 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4402                 rxq->id = queue;
4403                 rxq->size = pp->rx_ring_size;
4404                 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4405                 rxq->time_coal = MVNETA_RX_COAL_USEC;
4406                 rxq->buf_virt_addr
4407                         = devm_kmalloc_array(pp->dev->dev.parent,
4408                                              rxq->size,
4409                                              sizeof(*rxq->buf_virt_addr),
4410                                              GFP_KERNEL);
4411                 if (!rxq->buf_virt_addr)
4412                         return -ENOMEM;
4413         }
4414 
4415         return 0;
4416 }
4417 
4418 /* platform glue : initialize decoding windows */
4419 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4420                                      const struct mbus_dram_target_info *dram)
4421 {
4422         u32 win_enable;
4423         u32 win_protect;
4424         int i;
4425 
4426         for (i = 0; i < 6; i++) {
4427                 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4428                 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4429 
4430                 if (i < 4)
4431                         mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4432         }
4433 
4434         win_enable = 0x3f;
4435         win_protect = 0;
4436 
4437         if (dram) {
4438                 for (i = 0; i < dram->num_cs; i++) {
4439                         const struct mbus_dram_window *cs = dram->cs + i;
4440 
4441                         mvreg_write(pp, MVNETA_WIN_BASE(i),
4442                                     (cs->base & 0xffff0000) |
4443                                     (cs->mbus_attr << 8) |
4444                                     dram->mbus_dram_target_id);
4445 
4446                         mvreg_write(pp, MVNETA_WIN_SIZE(i),
4447                                     (cs->size - 1) & 0xffff0000);
4448 
4449                         win_enable &= ~(1 << i);
4450                         win_protect |= 3 << (2 * i);
4451                 }
4452         } else {
4453                 /* For Armada3700 open default 4GB Mbus window, leaving
4454                  * arbitration of target/attribute to a different layer
4455                  * of configuration.
4456                  */
4457                 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4458                 win_enable &= ~BIT(0);
4459                 win_protect = 3;
4460         }
4461 
4462         mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4463         mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4464 }
4465 
4466 /* Power up the port */
4467 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4468 {
4469         /* MAC Cause register should be cleared */
4470         mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4471 
4472         if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4473                 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4474         else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4475                  phy_interface_mode_is_8023z(phy_mode))
4476                 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4477         else if (!phy_interface_mode_is_rgmii(phy_mode))
4478                 return -EINVAL;
4479 
4480         return 0;
4481 }
4482 
4483 /* Device initialization routine */
4484 static int mvneta_probe(struct platform_device *pdev)
4485 {
4486         struct device_node *dn = pdev->dev.of_node;
4487         struct device_node *bm_node;
4488         struct mvneta_port *pp;
4489         struct net_device *dev;
4490         struct phylink *phylink;
4491         struct phy *comphy;
4492         const char *dt_mac_addr;
4493         char hw_mac_addr[ETH_ALEN];
4494         const char *mac_from;
4495         int tx_csum_limit;
4496         int phy_mode;
4497         int err;
4498         int cpu;
4499 
4500         dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
4501                                       txq_number, rxq_number);
4502         if (!dev)
4503                 return -ENOMEM;
4504 
4505         dev->irq = irq_of_parse_and_map(dn, 0);
4506         if (dev->irq == 0)
4507                 return -EINVAL;
4508 
4509         phy_mode = of_get_phy_mode(dn);
4510         if (phy_mode < 0) {
4511                 dev_err(&pdev->dev, "incorrect phy-mode\n");
4512                 err = -EINVAL;
4513                 goto err_free_irq;
4514         }
4515 
4516         comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
4517         if (comphy == ERR_PTR(-EPROBE_DEFER)) {
4518                 err = -EPROBE_DEFER;
4519                 goto err_free_irq;
4520         } else if (IS_ERR(comphy)) {
4521                 comphy = NULL;
4522         }
4523 
4524         pp = netdev_priv(dev);
4525         spin_lock_init(&pp->lock);
4526 
4527         pp->phylink_config.dev = &dev->dev;
4528         pp->phylink_config.type = PHYLINK_NETDEV;
4529 
4530         phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
4531                                  phy_mode, &mvneta_phylink_ops);
4532         if (IS_ERR(phylink)) {
4533                 err = PTR_ERR(phylink);
4534                 goto err_free_irq;
4535         }
4536 
4537         dev->tx_queue_len = MVNETA_MAX_TXD;
4538         dev->watchdog_timeo = 5 * HZ;
4539         dev->netdev_ops = &mvneta_netdev_ops;
4540 
4541         dev->ethtool_ops = &mvneta_eth_tool_ops;
4542 
4543         pp->phylink = phylink;
4544         pp->comphy = comphy;
4545         pp->phy_interface = phy_mode;
4546         pp->dn = dn;
4547 
4548         pp->rxq_def = rxq_def;
4549         pp->indir[0] = rxq_def;
4550 
4551         /* Get special SoC configurations */
4552         if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4553                 pp->neta_armada3700 = true;
4554 
4555         pp->clk = devm_clk_get(&pdev->dev, "core");
4556         if (IS_ERR(pp->clk))
4557                 pp->clk = devm_clk_get(&pdev->dev, NULL);
4558         if (IS_ERR(pp->clk)) {
4559                 err = PTR_ERR(pp->clk);
4560                 goto err_free_phylink;
4561         }
4562 
4563         clk_prepare_enable(pp->clk);
4564 
4565         pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4566         if (!IS_ERR(pp->clk_bus))
4567                 clk_prepare_enable(pp->clk_bus);
4568 
4569         pp->base = devm_platform_ioremap_resource(pdev, 0);
4570         if (IS_ERR(pp->base)) {
4571                 err = PTR_ERR(pp->base);
4572                 goto err_clk;
4573         }
4574 
4575         /* Alloc per-cpu port structure */
4576         pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4577         if (!pp->ports) {
4578                 err = -ENOMEM;
4579                 goto err_clk;
4580         }
4581 
4582         /* Alloc per-cpu stats */
4583         pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
4584         if (!pp->stats) {
4585                 err = -ENOMEM;
4586                 goto err_free_ports;
4587         }
4588 
4589         dt_mac_addr = of_get_mac_address(dn);
4590         if (!IS_ERR(dt_mac_addr)) {
4591                 mac_from = "device tree";
4592                 ether_addr_copy(dev->dev_addr, dt_mac_addr);
4593         } else {
4594                 mvneta_get_mac_addr(pp, hw_mac_addr);
4595                 if (is_valid_ether_addr(hw_mac_addr)) {
4596                         mac_from = "hardware";
4597                         memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4598                 } else {
4599                         mac_from = "random";
4600                         eth_hw_addr_random(dev);
4601                 }
4602         }
4603 
4604         if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4605                 if (tx_csum_limit < 0 ||
4606                     tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4607                         tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4608                         dev_info(&pdev->dev,
4609                                  "Wrong TX csum limit in DT, set to %dB\n",
4610                                  MVNETA_TX_CSUM_DEF_SIZE);
4611                 }
4612         } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4613                 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4614         } else {
4615                 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4616         }
4617 
4618         pp->tx_csum_limit = tx_csum_limit;
4619 
4620         pp->dram_target_info = mv_mbus_dram_info();
4621         /* Armada3700 requires setting default configuration of Mbus
4622          * windows, however without using filled mbus_dram_target_info
4623          * structure.
4624          */
4625         if (pp->dram_target_info || pp->neta_armada3700)
4626                 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4627 
4628         pp->tx_ring_size = MVNETA_MAX_TXD;
4629         pp->rx_ring_size = MVNETA_MAX_RXD;
4630 
4631         pp->dev = dev;
4632         SET_NETDEV_DEV(dev, &pdev->dev);
4633 
4634         pp->id = global_port_id++;
4635         pp->rx_offset_correction = 0; /* not relevant for SW BM */
4636 
4637         /* Obtain access to BM resources if enabled and already initialized */
4638         bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4639         if (bm_node) {
4640                 pp->bm_priv = mvneta_bm_get(bm_node);
4641                 if (pp->bm_priv) {
4642                         err = mvneta_bm_port_init(pdev, pp);
4643                         if (err < 0) {
4644                                 dev_info(&pdev->dev,
4645                                          "use SW buffer management\n");
4646                                 mvneta_bm_put(pp->bm_priv);
4647                                 pp->bm_priv = NULL;
4648                         }
4649                 }
4650                 /* Set RX packet offset correction for platforms, whose
4651                  * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4652                  * platforms and 0B for 32-bit ones.
4653                  */
4654                 pp->rx_offset_correction = max(0,
4655                                                NET_SKB_PAD -
4656                                                MVNETA_RX_PKT_OFFSET_CORRECTION);
4657         }
4658         of_node_put(bm_node);
4659 
4660         err = mvneta_init(&pdev->dev, pp);
4661         if (err < 0)
4662                 goto err_netdev;
4663 
4664         err = mvneta_port_power_up(pp, phy_mode);
4665         if (err < 0) {
4666                 dev_err(&pdev->dev, "can't power up port\n");
4667                 goto err_netdev;
4668         }
4669 
4670         /* Armada3700 network controller does not support per-cpu
4671          * operation, so only single NAPI should be initialized.
4672          */
4673         if (pp->neta_armada3700) {
4674                 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4675         } else {
4676                 for_each_present_cpu(cpu) {
4677                         struct mvneta_pcpu_port *port =
4678                                 per_cpu_ptr(pp->ports, cpu);
4679 
4680                         netif_napi_add(dev, &port->napi, mvneta_poll,
4681                                        NAPI_POLL_WEIGHT);
4682                         port->pp = pp;
4683                 }
4684         }
4685 
4686         dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4687                         NETIF_F_TSO | NETIF_F_RXCSUM;
4688         dev->hw_features |= dev->features;
4689         dev->vlan_features |= dev->features;
4690         dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4691         dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
4692 
4693         /* MTU range: 68 - 9676 */
4694         dev->min_mtu = ETH_MIN_MTU;
4695         /* 9676 == 9700 - 20 and rounding to 8 */
4696         dev->max_mtu = 9676;
4697 
4698         err = register_netdev(dev);
4699         if (err < 0) {
4700                 dev_err(&pdev->dev, "failed to register\n");
4701                 goto err_netdev;
4702         }
4703 
4704         netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4705                     dev->dev_addr);
4706 
4707         platform_set_drvdata(pdev, pp->dev);
4708 
4709         return 0;
4710 
4711 err_netdev:
4712         if (pp->bm_priv) {
4713                 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4714                 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4715                                        1 << pp->id);
4716                 mvneta_bm_put(pp->bm_priv);
4717         }
4718         free_percpu(pp->stats);
4719 err_free_ports:
4720         free_percpu(pp->ports);
4721 err_clk:
4722         clk_disable_unprepare(pp->clk_bus);
4723         clk_disable_unprepare(pp->clk);
4724 err_free_phylink:
4725         if (pp->phylink)
4726                 phylink_destroy(pp->phylink);
4727 err_free_irq:
4728         irq_dispose_mapping(dev->irq);
4729         return err;
4730 }
4731 
4732 /* Device removal routine */
4733 static int mvneta_remove(struct platform_device *pdev)
4734 {
4735         struct net_device  *dev = platform_get_drvdata(pdev);
4736         struct mvneta_port *pp = netdev_priv(dev);
4737 
4738         unregister_netdev(dev);
4739         clk_disable_unprepare(pp->clk_bus);
4740         clk_disable_unprepare(pp->clk);
4741         free_percpu(pp->ports);
4742         free_percpu(pp->stats);
4743         irq_dispose_mapping(dev->irq);
4744         phylink_destroy(pp->phylink);
4745 
4746         if (pp->bm_priv) {
4747                 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4748                 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4749                                        1 << pp->id);
4750                 mvneta_bm_put(pp->bm_priv);
4751         }
4752 
4753         return 0;
4754 }
4755 
4756 #ifdef CONFIG_PM_SLEEP
4757 static int mvneta_suspend(struct device *device)
4758 {
4759         int queue;
4760         struct net_device *dev = dev_get_drvdata(device);
4761         struct mvneta_port *pp = netdev_priv(dev);
4762 
4763         if (!netif_running(dev))
4764                 goto clean_exit;
4765 
4766         if (!pp->neta_armada3700) {
4767                 spin_lock(&pp->lock);
4768                 pp->is_stopped = true;
4769                 spin_unlock(&pp->lock);
4770 
4771                 cpuhp_state_remove_instance_nocalls(online_hpstate,
4772                                                     &pp->node_online);
4773                 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4774                                                     &pp->node_dead);
4775         }
4776 
4777         rtnl_lock();
4778         mvneta_stop_dev(pp);
4779         rtnl_unlock();
4780 
4781         for (queue = 0; queue < rxq_number; queue++) {
4782                 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4783 
4784                 mvneta_rxq_drop_pkts(pp, rxq);
4785         }
4786 
4787         for (queue = 0; queue < txq_number; queue++) {
4788                 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4789 
4790                 mvneta_txq_hw_deinit(pp, txq);
4791         }
4792 
4793 clean_exit:
4794         netif_device_detach(dev);
4795         clk_disable_unprepare(pp->clk_bus);
4796         clk_disable_unprepare(pp->clk);
4797 
4798         return 0;
4799 }
4800 
4801 static int mvneta_resume(struct device *device)
4802 {
4803         struct platform_device *pdev = to_platform_device(device);
4804         struct net_device *dev = dev_get_drvdata(device);
4805         struct mvneta_port *pp = netdev_priv(dev);
4806         int err, queue;
4807 
4808         clk_prepare_enable(pp->clk);
4809         if (!IS_ERR(pp->clk_bus))
4810                 clk_prepare_enable(pp->clk_bus);
4811         if (pp->dram_target_info || pp->neta_armada3700)
4812                 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4813         if (pp->bm_priv) {
4814                 err = mvneta_bm_port_init(pdev, pp);
4815                 if (err < 0) {
4816                         dev_info(&pdev->dev, "use SW buffer management\n");
4817                         pp->bm_priv = NULL;
4818                 }
4819         }
4820         mvneta_defaults_set(pp);
4821         err = mvneta_port_power_up(pp, pp->phy_interface);
4822         if (err < 0) {
4823                 dev_err(device, "can't power up port\n");
4824                 return err;
4825         }
4826 
4827         netif_device_attach(dev);
4828 
4829         if (!netif_running(dev))
4830                 return 0;
4831 
4832         for (queue = 0; queue < rxq_number; queue++) {
4833                 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4834 
4835                 rxq->next_desc_to_proc = 0;
4836                 mvneta_rxq_hw_init(pp, rxq);
4837         }
4838 
4839         for (queue = 0; queue < txq_number; queue++) {
4840                 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4841 
4842                 txq->next_desc_to_proc = 0;
4843                 mvneta_txq_hw_init(pp, txq);
4844         }
4845 
4846         if (!pp->neta_armada3700) {
4847                 spin_lock(&pp->lock);
4848                 pp->is_stopped = false;
4849                 spin_unlock(&pp->lock);
4850                 cpuhp_state_add_instance_nocalls(online_hpstate,
4851                                                  &pp->node_online);
4852                 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4853                                                  &pp->node_dead);
4854         }
4855 
4856         rtnl_lock();
4857         mvneta_start_dev(pp);
4858         rtnl_unlock();
4859         mvneta_set_rx_mode(dev);
4860 
4861         return 0;
4862 }
4863 #endif
4864 
4865 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4866 
4867 static const struct of_device_id mvneta_match[] = {
4868         { .compatible = "marvell,armada-370-neta" },
4869         { .compatible = "marvell,armada-xp-neta" },
4870         { .compatible = "marvell,armada-3700-neta" },
4871         { }
4872 };
4873 MODULE_DEVICE_TABLE(of, mvneta_match);
4874 
4875 static struct platform_driver mvneta_driver = {
4876         .probe = mvneta_probe,
4877         .remove = mvneta_remove,
4878         .driver = {
4879                 .name = MVNETA_DRIVER_NAME,
4880                 .of_match_table = mvneta_match,
4881                 .pm = &mvneta_pm_ops,
4882         },
4883 };
4884 
4885 static int __init mvneta_driver_init(void)
4886 {
4887         int ret;
4888 
4889         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4890                                       mvneta_cpu_online,
4891                                       mvneta_cpu_down_prepare);
4892         if (ret < 0)
4893                 goto out;
4894         online_hpstate = ret;
4895         ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4896                                       NULL, mvneta_cpu_dead);
4897         if (ret)
4898                 goto err_dead;
4899 
4900         ret = platform_driver_register(&mvneta_driver);
4901         if (ret)
4902                 goto err;
4903         return 0;
4904 
4905 err:
4906         cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4907 err_dead:
4908         cpuhp_remove_multi_state(online_hpstate);
4909 out:
4910         return ret;
4911 }
4912 module_init(mvneta_driver_init);
4913 
4914 static void __exit mvneta_driver_exit(void)
4915 {
4916         platform_driver_unregister(&mvneta_driver);
4917         cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4918         cpuhp_remove_multi_state(online_hpstate);
4919 }
4920 module_exit(mvneta_driver_exit);
4921 
4922 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4923 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4924 MODULE_LICENSE("GPL");
4925 
4926 module_param(rxq_number, int, 0444);
4927 module_param(txq_number, int, 0444);
4928 
4929 module_param(rxq_def, int, 0444);
4930 module_param(rx_copybreak, int, 0644);

/* [<][>][^][v][top][bottom][index][help] */