root/drivers/net/ethernet/mediatek/mtk_eth_soc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mtk_w32
  2. mtk_r32
  3. mtk_m32
  4. mtk_mdio_busy_wait
  5. _mtk_mdio_write
  6. _mtk_mdio_read
  7. mtk_mdio_write
  8. mtk_mdio_read
  9. mt7621_gmac0_rgmii_adjust
  10. mtk_gmac0_rgmii_adjust
  11. mtk_mac_config
  12. mtk_mac_link_state
  13. mtk_mac_an_restart
  14. mtk_mac_link_down
  15. mtk_mac_link_up
  16. mtk_validate
  17. mtk_mdio_init
  18. mtk_mdio_cleanup
  19. mtk_tx_irq_disable
  20. mtk_tx_irq_enable
  21. mtk_rx_irq_disable
  22. mtk_rx_irq_enable
  23. mtk_set_mac_address
  24. mtk_stats_update_mac
  25. mtk_stats_update
  26. mtk_get_stats64
  27. mtk_max_frag_size
  28. mtk_max_buf_size
  29. mtk_rx_get_desc
  30. mtk_init_fq_dma
  31. mtk_qdma_phys_to_virt
  32. mtk_desc_to_tx_buf
  33. qdma_to_pdma
  34. txd_to_idx
  35. mtk_tx_unmap
  36. setup_tx_buf
  37. mtk_tx_map
  38. mtk_cal_txd_req
  39. mtk_queue_stopped
  40. mtk_wake_queue
  41. mtk_stop_queue
  42. mtk_start_xmit
  43. mtk_get_rx_ring
  44. mtk_update_rx_cpu_idx
  45. mtk_poll_rx
  46. mtk_poll_tx_qdma
  47. mtk_poll_tx_pdma
  48. mtk_poll_tx
  49. mtk_handle_status_irq
  50. mtk_napi_tx
  51. mtk_napi_rx
  52. mtk_tx_alloc
  53. mtk_tx_clean
  54. mtk_rx_alloc
  55. mtk_rx_clean
  56. mtk_hwlro_rx_init
  57. mtk_hwlro_rx_uninit
  58. mtk_hwlro_val_ipaddr
  59. mtk_hwlro_inval_ipaddr
  60. mtk_hwlro_get_ip_cnt
  61. mtk_hwlro_add_ipaddr
  62. mtk_hwlro_del_ipaddr
  63. mtk_hwlro_netdev_disable
  64. mtk_hwlro_get_fdir_entry
  65. mtk_hwlro_get_fdir_all
  66. mtk_fix_features
  67. mtk_set_features
  68. mtk_dma_busy_wait
  69. mtk_dma_init
  70. mtk_dma_free
  71. mtk_tx_timeout
  72. mtk_handle_irq_rx
  73. mtk_handle_irq_tx
  74. mtk_handle_irq
  75. mtk_poll_controller
  76. mtk_start_dma
  77. mtk_open
  78. mtk_stop_dma
  79. mtk_stop
  80. ethsys_reset
  81. mtk_clk_disable
  82. mtk_clk_enable
  83. mtk_hw_init
  84. mtk_hw_deinit
  85. mtk_init
  86. mtk_uninit
  87. mtk_do_ioctl
  88. mtk_pending_work
  89. mtk_free_dev
  90. mtk_unreg_dev
  91. mtk_cleanup
  92. mtk_get_link_ksettings
  93. mtk_set_link_ksettings
  94. mtk_get_drvinfo
  95. mtk_get_msglevel
  96. mtk_set_msglevel
  97. mtk_nway_reset
  98. mtk_get_strings
  99. mtk_get_sset_count
  100. mtk_get_ethtool_stats
  101. mtk_get_rxnfc
  102. mtk_set_rxnfc
  103. mtk_add_mac
  104. mtk_probe
  105. mtk_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *
   4  *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
   5  *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
   6  *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
   7  */
   8 
   9 #include <linux/of_device.h>
  10 #include <linux/of_mdio.h>
  11 #include <linux/of_net.h>
  12 #include <linux/mfd/syscon.h>
  13 #include <linux/regmap.h>
  14 #include <linux/clk.h>
  15 #include <linux/pm_runtime.h>
  16 #include <linux/if_vlan.h>
  17 #include <linux/reset.h>
  18 #include <linux/tcp.h>
  19 #include <linux/interrupt.h>
  20 #include <linux/pinctrl/devinfo.h>
  21 #include <linux/phylink.h>
  22 
  23 #include "mtk_eth_soc.h"
  24 
  25 static int mtk_msg_level = -1;
  26 module_param_named(msg_level, mtk_msg_level, int, 0);
  27 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28 
  29 #define MTK_ETHTOOL_STAT(x) { #x, \
  30                               offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  31 
  32 /* strings used by ethtool */
  33 static const struct mtk_ethtool_stats {
  34         char str[ETH_GSTRING_LEN];
  35         u32 offset;
  36 } mtk_ethtool_stats[] = {
  37         MTK_ETHTOOL_STAT(tx_bytes),
  38         MTK_ETHTOOL_STAT(tx_packets),
  39         MTK_ETHTOOL_STAT(tx_skip),
  40         MTK_ETHTOOL_STAT(tx_collisions),
  41         MTK_ETHTOOL_STAT(rx_bytes),
  42         MTK_ETHTOOL_STAT(rx_packets),
  43         MTK_ETHTOOL_STAT(rx_overflow),
  44         MTK_ETHTOOL_STAT(rx_fcs_errors),
  45         MTK_ETHTOOL_STAT(rx_short_errors),
  46         MTK_ETHTOOL_STAT(rx_long_errors),
  47         MTK_ETHTOOL_STAT(rx_checksum_errors),
  48         MTK_ETHTOOL_STAT(rx_flow_control_packets),
  49 };
  50 
  51 static const char * const mtk_clks_source_name[] = {
  52         "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
  53         "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
  54         "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
  55         "sgmii_ck", "eth2pll",
  56 };
  57 
  58 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  59 {
  60         __raw_writel(val, eth->base + reg);
  61 }
  62 
  63 u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  64 {
  65         return __raw_readl(eth->base + reg);
  66 }
  67 
  68 u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
  69 {
  70         u32 val;
  71 
  72         val = mtk_r32(eth, reg);
  73         val &= ~mask;
  74         val |= set;
  75         mtk_w32(eth, val, reg);
  76         return reg;
  77 }
  78 
  79 static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  80 {
  81         unsigned long t_start = jiffies;
  82 
  83         while (1) {
  84                 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  85                         return 0;
  86                 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  87                         break;
  88                 usleep_range(10, 20);
  89         }
  90 
  91         dev_err(eth->dev, "mdio: MDIO timeout\n");
  92         return -1;
  93 }
  94 
  95 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  96                            u32 phy_register, u32 write_data)
  97 {
  98         if (mtk_mdio_busy_wait(eth))
  99                 return -1;
 100 
 101         write_data &= 0xffff;
 102 
 103         mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
 104                 (phy_register << PHY_IAC_REG_SHIFT) |
 105                 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
 106                 MTK_PHY_IAC);
 107 
 108         if (mtk_mdio_busy_wait(eth))
 109                 return -1;
 110 
 111         return 0;
 112 }
 113 
 114 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
 115 {
 116         u32 d;
 117 
 118         if (mtk_mdio_busy_wait(eth))
 119                 return 0xffff;
 120 
 121         mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
 122                 (phy_reg << PHY_IAC_REG_SHIFT) |
 123                 (phy_addr << PHY_IAC_ADDR_SHIFT),
 124                 MTK_PHY_IAC);
 125 
 126         if (mtk_mdio_busy_wait(eth))
 127                 return 0xffff;
 128 
 129         d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
 130 
 131         return d;
 132 }
 133 
 134 static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
 135                           int phy_reg, u16 val)
 136 {
 137         struct mtk_eth *eth = bus->priv;
 138 
 139         return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
 140 }
 141 
 142 static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 143 {
 144         struct mtk_eth *eth = bus->priv;
 145 
 146         return _mtk_mdio_read(eth, phy_addr, phy_reg);
 147 }
 148 
 149 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
 150                                      phy_interface_t interface)
 151 {
 152         u32 val;
 153 
 154         /* Check DDR memory type.
 155          * Currently TRGMII mode with DDR2 memory is not supported.
 156          */
 157         regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
 158         if (interface == PHY_INTERFACE_MODE_TRGMII &&
 159             val & SYSCFG_DRAM_TYPE_DDR2) {
 160                 dev_err(eth->dev,
 161                         "TRGMII mode with DDR2 memory is not supported!\n");
 162                 return -EOPNOTSUPP;
 163         }
 164 
 165         val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
 166                 ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
 167 
 168         regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
 169                            ETHSYS_TRGMII_MT7621_MASK, val);
 170 
 171         return 0;
 172 }
 173 
 174 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
 175 {
 176         u32 val;
 177         int ret;
 178 
 179         val = (speed == SPEED_1000) ?
 180                 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
 181         mtk_w32(eth, val, INTF_MODE);
 182 
 183         regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
 184                            ETHSYS_TRGMII_CLK_SEL362_5,
 185                            ETHSYS_TRGMII_CLK_SEL362_5);
 186 
 187         val = (speed == SPEED_1000) ? 250000000 : 500000000;
 188         ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
 189         if (ret)
 190                 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
 191 
 192         val = (speed == SPEED_1000) ?
 193                 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
 194         mtk_w32(eth, val, TRGMII_RCK_CTRL);
 195 
 196         val = (speed == SPEED_1000) ?
 197                 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
 198         mtk_w32(eth, val, TRGMII_TCK_CTRL);
 199 }
 200 
 201 static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
 202                            const struct phylink_link_state *state)
 203 {
 204         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 205                                            phylink_config);
 206         struct mtk_eth *eth = mac->hw;
 207         u32 mcr_cur, mcr_new, sid, i;
 208         int val, ge_mode, err;
 209 
 210         /* MT76x8 has no hardware settings between for the MAC */
 211         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
 212             mac->interface != state->interface) {
 213                 /* Setup soc pin functions */
 214                 switch (state->interface) {
 215                 case PHY_INTERFACE_MODE_TRGMII:
 216                         if (mac->id)
 217                                 goto err_phy;
 218                         if (!MTK_HAS_CAPS(mac->hw->soc->caps,
 219                                           MTK_GMAC1_TRGMII))
 220                                 goto err_phy;
 221                         /* fall through */
 222                 case PHY_INTERFACE_MODE_RGMII_TXID:
 223                 case PHY_INTERFACE_MODE_RGMII_RXID:
 224                 case PHY_INTERFACE_MODE_RGMII_ID:
 225                 case PHY_INTERFACE_MODE_RGMII:
 226                 case PHY_INTERFACE_MODE_MII:
 227                 case PHY_INTERFACE_MODE_REVMII:
 228                 case PHY_INTERFACE_MODE_RMII:
 229                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
 230                                 err = mtk_gmac_rgmii_path_setup(eth, mac->id);
 231                                 if (err)
 232                                         goto init_err;
 233                         }
 234                         break;
 235                 case PHY_INTERFACE_MODE_1000BASEX:
 236                 case PHY_INTERFACE_MODE_2500BASEX:
 237                 case PHY_INTERFACE_MODE_SGMII:
 238                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 239                                 err = mtk_gmac_sgmii_path_setup(eth, mac->id);
 240                                 if (err)
 241                                         goto init_err;
 242                         }
 243                         break;
 244                 case PHY_INTERFACE_MODE_GMII:
 245                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
 246                                 err = mtk_gmac_gephy_path_setup(eth, mac->id);
 247                                 if (err)
 248                                         goto init_err;
 249                         }
 250                         break;
 251                 default:
 252                         goto err_phy;
 253                 }
 254 
 255                 /* Setup clock for 1st gmac */
 256                 if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
 257                     !phy_interface_mode_is_8023z(state->interface) &&
 258                     MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
 259                         if (MTK_HAS_CAPS(mac->hw->soc->caps,
 260                                          MTK_TRGMII_MT7621_CLK)) {
 261                                 if (mt7621_gmac0_rgmii_adjust(mac->hw,
 262                                                               state->interface))
 263                                         goto err_phy;
 264                         } else {
 265                                 if (state->interface !=
 266                                     PHY_INTERFACE_MODE_TRGMII)
 267                                         mtk_gmac0_rgmii_adjust(mac->hw,
 268                                                                state->speed);
 269 
 270                                 /* mt7623_pad_clk_setup */
 271                                 for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
 272                                         mtk_w32(mac->hw,
 273                                                 TD_DM_DRVP(8) | TD_DM_DRVN(8),
 274                                                 TRGMII_TD_ODT(i));
 275 
 276                                 /* Assert/release MT7623 RXC reset */
 277                                 mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
 278                                         TRGMII_RCK_CTRL);
 279                                 mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
 280                         }
 281                 }
 282 
 283                 ge_mode = 0;
 284                 switch (state->interface) {
 285                 case PHY_INTERFACE_MODE_MII:
 286                 case PHY_INTERFACE_MODE_GMII:
 287                         ge_mode = 1;
 288                         break;
 289                 case PHY_INTERFACE_MODE_REVMII:
 290                         ge_mode = 2;
 291                         break;
 292                 case PHY_INTERFACE_MODE_RMII:
 293                         if (mac->id)
 294                                 goto err_phy;
 295                         ge_mode = 3;
 296                         break;
 297                 default:
 298                         break;
 299                 }
 300 
 301                 /* put the gmac into the right mode */
 302                 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
 303                 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
 304                 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
 305                 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 306 
 307                 mac->interface = state->interface;
 308         }
 309 
 310         /* SGMII */
 311         if (state->interface == PHY_INTERFACE_MODE_SGMII ||
 312             phy_interface_mode_is_8023z(state->interface)) {
 313                 /* The path GMAC to SGMII will be enabled once the SGMIISYS is
 314                  * being setup done.
 315                  */
 316                 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
 317 
 318                 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
 319                                    SYSCFG0_SGMII_MASK,
 320                                    ~(u32)SYSCFG0_SGMII_MASK);
 321 
 322                 /* Decide how GMAC and SGMIISYS be mapped */
 323                 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
 324                        0 : mac->id;
 325 
 326                 /* Setup SGMIISYS with the determined property */
 327                 if (state->interface != PHY_INTERFACE_MODE_SGMII)
 328                         err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
 329                                                          state);
 330                 else if (phylink_autoneg_inband(mode))
 331                         err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
 332 
 333                 if (err)
 334                         goto init_err;
 335 
 336                 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
 337                                    SYSCFG0_SGMII_MASK, val);
 338         } else if (phylink_autoneg_inband(mode)) {
 339                 dev_err(eth->dev,
 340                         "In-band mode not supported in non SGMII mode!\n");
 341                 return;
 342         }
 343 
 344         /* Setup gmac */
 345         mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 346         mcr_new = mcr_cur;
 347         mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
 348                      MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
 349                      MAC_MCR_FORCE_RX_FC);
 350         mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
 351                    MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
 352 
 353         switch (state->speed) {
 354         case SPEED_2500:
 355         case SPEED_1000:
 356                 mcr_new |= MAC_MCR_SPEED_1000;
 357                 break;
 358         case SPEED_100:
 359                 mcr_new |= MAC_MCR_SPEED_100;
 360                 break;
 361         }
 362         if (state->duplex == DUPLEX_FULL) {
 363                 mcr_new |= MAC_MCR_FORCE_DPX;
 364                 if (state->pause & MLO_PAUSE_TX)
 365                         mcr_new |= MAC_MCR_FORCE_TX_FC;
 366                 if (state->pause & MLO_PAUSE_RX)
 367                         mcr_new |= MAC_MCR_FORCE_RX_FC;
 368         }
 369 
 370         /* Only update control register when needed! */
 371         if (mcr_new != mcr_cur)
 372                 mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
 373 
 374         return;
 375 
 376 err_phy:
 377         dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
 378                 mac->id, phy_modes(state->interface));
 379         return;
 380 
 381 init_err:
 382         dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
 383                 mac->id, phy_modes(state->interface), err);
 384 }
 385 
 386 static int mtk_mac_link_state(struct phylink_config *config,
 387                               struct phylink_link_state *state)
 388 {
 389         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 390                                            phylink_config);
 391         u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
 392 
 393         state->link = (pmsr & MAC_MSR_LINK);
 394         state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
 395 
 396         switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
 397         case 0:
 398                 state->speed = SPEED_10;
 399                 break;
 400         case MAC_MSR_SPEED_100:
 401                 state->speed = SPEED_100;
 402                 break;
 403         case MAC_MSR_SPEED_1000:
 404                 state->speed = SPEED_1000;
 405                 break;
 406         default:
 407                 state->speed = SPEED_UNKNOWN;
 408                 break;
 409         }
 410 
 411         state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
 412         if (pmsr & MAC_MSR_RX_FC)
 413                 state->pause |= MLO_PAUSE_RX;
 414         if (pmsr & MAC_MSR_TX_FC)
 415                 state->pause |= MLO_PAUSE_TX;
 416 
 417         return 1;
 418 }
 419 
 420 static void mtk_mac_an_restart(struct phylink_config *config)
 421 {
 422         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 423                                            phylink_config);
 424 
 425         mtk_sgmii_restart_an(mac->hw, mac->id);
 426 }
 427 
 428 static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
 429                               phy_interface_t interface)
 430 {
 431         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 432                                            phylink_config);
 433         u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 434 
 435         mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
 436         mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 437 }
 438 
 439 static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
 440                             phy_interface_t interface,
 441                             struct phy_device *phy)
 442 {
 443         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 444                                            phylink_config);
 445         u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 446 
 447         mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
 448         mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 449 }
 450 
 451 static void mtk_validate(struct phylink_config *config,
 452                          unsigned long *supported,
 453                          struct phylink_link_state *state)
 454 {
 455         struct mtk_mac *mac = container_of(config, struct mtk_mac,
 456                                            phylink_config);
 457         __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 458 
 459         if (state->interface != PHY_INTERFACE_MODE_NA &&
 460             state->interface != PHY_INTERFACE_MODE_MII &&
 461             state->interface != PHY_INTERFACE_MODE_GMII &&
 462             !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
 463               phy_interface_mode_is_rgmii(state->interface)) &&
 464             !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
 465               !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
 466             !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
 467               (state->interface == PHY_INTERFACE_MODE_SGMII ||
 468                phy_interface_mode_is_8023z(state->interface)))) {
 469                 linkmode_zero(supported);
 470                 return;
 471         }
 472 
 473         phylink_set_port_modes(mask);
 474         phylink_set(mask, Autoneg);
 475 
 476         switch (state->interface) {
 477         case PHY_INTERFACE_MODE_TRGMII:
 478                 phylink_set(mask, 1000baseT_Full);
 479                 break;
 480         case PHY_INTERFACE_MODE_1000BASEX:
 481         case PHY_INTERFACE_MODE_2500BASEX:
 482                 phylink_set(mask, 1000baseX_Full);
 483                 phylink_set(mask, 2500baseX_Full);
 484                 break;
 485         case PHY_INTERFACE_MODE_GMII:
 486         case PHY_INTERFACE_MODE_RGMII:
 487         case PHY_INTERFACE_MODE_RGMII_ID:
 488         case PHY_INTERFACE_MODE_RGMII_RXID:
 489         case PHY_INTERFACE_MODE_RGMII_TXID:
 490                 phylink_set(mask, 1000baseT_Half);
 491                 /* fall through */
 492         case PHY_INTERFACE_MODE_SGMII:
 493                 phylink_set(mask, 1000baseT_Full);
 494                 phylink_set(mask, 1000baseX_Full);
 495                 /* fall through */
 496         case PHY_INTERFACE_MODE_MII:
 497         case PHY_INTERFACE_MODE_RMII:
 498         case PHY_INTERFACE_MODE_REVMII:
 499         case PHY_INTERFACE_MODE_NA:
 500         default:
 501                 phylink_set(mask, 10baseT_Half);
 502                 phylink_set(mask, 10baseT_Full);
 503                 phylink_set(mask, 100baseT_Half);
 504                 phylink_set(mask, 100baseT_Full);
 505                 break;
 506         }
 507 
 508         if (state->interface == PHY_INTERFACE_MODE_NA) {
 509                 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
 510                         phylink_set(mask, 1000baseT_Full);
 511                         phylink_set(mask, 1000baseX_Full);
 512                         phylink_set(mask, 2500baseX_Full);
 513                 }
 514                 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
 515                         phylink_set(mask, 1000baseT_Full);
 516                         phylink_set(mask, 1000baseT_Half);
 517                         phylink_set(mask, 1000baseX_Full);
 518                 }
 519                 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
 520                         phylink_set(mask, 1000baseT_Full);
 521                         phylink_set(mask, 1000baseT_Half);
 522                 }
 523         }
 524 
 525         phylink_set(mask, Pause);
 526         phylink_set(mask, Asym_Pause);
 527 
 528         linkmode_and(supported, supported, mask);
 529         linkmode_and(state->advertising, state->advertising, mask);
 530 
 531         /* We can only operate at 2500BaseX or 1000BaseX. If requested
 532          * to advertise both, only report advertising at 2500BaseX.
 533          */
 534         phylink_helper_basex_speed(state);
 535 }
 536 
 537 static const struct phylink_mac_ops mtk_phylink_ops = {
 538         .validate = mtk_validate,
 539         .mac_link_state = mtk_mac_link_state,
 540         .mac_an_restart = mtk_mac_an_restart,
 541         .mac_config = mtk_mac_config,
 542         .mac_link_down = mtk_mac_link_down,
 543         .mac_link_up = mtk_mac_link_up,
 544 };
 545 
 546 static int mtk_mdio_init(struct mtk_eth *eth)
 547 {
 548         struct device_node *mii_np;
 549         int ret;
 550 
 551         mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
 552         if (!mii_np) {
 553                 dev_err(eth->dev, "no %s child node found", "mdio-bus");
 554                 return -ENODEV;
 555         }
 556 
 557         if (!of_device_is_available(mii_np)) {
 558                 ret = -ENODEV;
 559                 goto err_put_node;
 560         }
 561 
 562         eth->mii_bus = devm_mdiobus_alloc(eth->dev);
 563         if (!eth->mii_bus) {
 564                 ret = -ENOMEM;
 565                 goto err_put_node;
 566         }
 567 
 568         eth->mii_bus->name = "mdio";
 569         eth->mii_bus->read = mtk_mdio_read;
 570         eth->mii_bus->write = mtk_mdio_write;
 571         eth->mii_bus->priv = eth;
 572         eth->mii_bus->parent = eth->dev;
 573 
 574         snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
 575         ret = of_mdiobus_register(eth->mii_bus, mii_np);
 576 
 577 err_put_node:
 578         of_node_put(mii_np);
 579         return ret;
 580 }
 581 
 582 static void mtk_mdio_cleanup(struct mtk_eth *eth)
 583 {
 584         if (!eth->mii_bus)
 585                 return;
 586 
 587         mdiobus_unregister(eth->mii_bus);
 588 }
 589 
 590 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
 591 {
 592         unsigned long flags;
 593         u32 val;
 594 
 595         spin_lock_irqsave(&eth->tx_irq_lock, flags);
 596         val = mtk_r32(eth, eth->tx_int_mask_reg);
 597         mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
 598         spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 599 }
 600 
 601 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
 602 {
 603         unsigned long flags;
 604         u32 val;
 605 
 606         spin_lock_irqsave(&eth->tx_irq_lock, flags);
 607         val = mtk_r32(eth, eth->tx_int_mask_reg);
 608         mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
 609         spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 610 }
 611 
 612 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
 613 {
 614         unsigned long flags;
 615         u32 val;
 616 
 617         spin_lock_irqsave(&eth->rx_irq_lock, flags);
 618         val = mtk_r32(eth, MTK_PDMA_INT_MASK);
 619         mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
 620         spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 621 }
 622 
 623 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
 624 {
 625         unsigned long flags;
 626         u32 val;
 627 
 628         spin_lock_irqsave(&eth->rx_irq_lock, flags);
 629         val = mtk_r32(eth, MTK_PDMA_INT_MASK);
 630         mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
 631         spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 632 }
 633 
 634 static int mtk_set_mac_address(struct net_device *dev, void *p)
 635 {
 636         int ret = eth_mac_addr(dev, p);
 637         struct mtk_mac *mac = netdev_priv(dev);
 638         struct mtk_eth *eth = mac->hw;
 639         const char *macaddr = dev->dev_addr;
 640 
 641         if (ret)
 642                 return ret;
 643 
 644         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
 645                 return -EBUSY;
 646 
 647         spin_lock_bh(&mac->hw->page_lock);
 648         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 649                 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
 650                         MT7628_SDM_MAC_ADRH);
 651                 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
 652                         (macaddr[4] << 8) | macaddr[5],
 653                         MT7628_SDM_MAC_ADRL);
 654         } else {
 655                 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
 656                         MTK_GDMA_MAC_ADRH(mac->id));
 657                 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
 658                         (macaddr[4] << 8) | macaddr[5],
 659                         MTK_GDMA_MAC_ADRL(mac->id));
 660         }
 661         spin_unlock_bh(&mac->hw->page_lock);
 662 
 663         return 0;
 664 }
 665 
 666 void mtk_stats_update_mac(struct mtk_mac *mac)
 667 {
 668         struct mtk_hw_stats *hw_stats = mac->hw_stats;
 669         unsigned int base = MTK_GDM1_TX_GBCNT;
 670         u64 stats;
 671 
 672         base += hw_stats->reg_offset;
 673 
 674         u64_stats_update_begin(&hw_stats->syncp);
 675 
 676         hw_stats->rx_bytes += mtk_r32(mac->hw, base);
 677         stats =  mtk_r32(mac->hw, base + 0x04);
 678         if (stats)
 679                 hw_stats->rx_bytes += (stats << 32);
 680         hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
 681         hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
 682         hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
 683         hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
 684         hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
 685         hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
 686         hw_stats->rx_flow_control_packets +=
 687                                         mtk_r32(mac->hw, base + 0x24);
 688         hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
 689         hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
 690         hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
 691         stats =  mtk_r32(mac->hw, base + 0x34);
 692         if (stats)
 693                 hw_stats->tx_bytes += (stats << 32);
 694         hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
 695         u64_stats_update_end(&hw_stats->syncp);
 696 }
 697 
 698 static void mtk_stats_update(struct mtk_eth *eth)
 699 {
 700         int i;
 701 
 702         for (i = 0; i < MTK_MAC_COUNT; i++) {
 703                 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
 704                         continue;
 705                 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
 706                         mtk_stats_update_mac(eth->mac[i]);
 707                         spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
 708                 }
 709         }
 710 }
 711 
 712 static void mtk_get_stats64(struct net_device *dev,
 713                             struct rtnl_link_stats64 *storage)
 714 {
 715         struct mtk_mac *mac = netdev_priv(dev);
 716         struct mtk_hw_stats *hw_stats = mac->hw_stats;
 717         unsigned int start;
 718 
 719         if (netif_running(dev) && netif_device_present(dev)) {
 720                 if (spin_trylock_bh(&hw_stats->stats_lock)) {
 721                         mtk_stats_update_mac(mac);
 722                         spin_unlock_bh(&hw_stats->stats_lock);
 723                 }
 724         }
 725 
 726         do {
 727                 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
 728                 storage->rx_packets = hw_stats->rx_packets;
 729                 storage->tx_packets = hw_stats->tx_packets;
 730                 storage->rx_bytes = hw_stats->rx_bytes;
 731                 storage->tx_bytes = hw_stats->tx_bytes;
 732                 storage->collisions = hw_stats->tx_collisions;
 733                 storage->rx_length_errors = hw_stats->rx_short_errors +
 734                         hw_stats->rx_long_errors;
 735                 storage->rx_over_errors = hw_stats->rx_overflow;
 736                 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
 737                 storage->rx_errors = hw_stats->rx_checksum_errors;
 738                 storage->tx_aborted_errors = hw_stats->tx_skip;
 739         } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
 740 
 741         storage->tx_errors = dev->stats.tx_errors;
 742         storage->rx_dropped = dev->stats.rx_dropped;
 743         storage->tx_dropped = dev->stats.tx_dropped;
 744 }
 745 
 746 static inline int mtk_max_frag_size(int mtu)
 747 {
 748         /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
 749         if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
 750                 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
 751 
 752         return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
 753                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 754 }
 755 
 756 static inline int mtk_max_buf_size(int frag_size)
 757 {
 758         int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
 759                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 760 
 761         WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
 762 
 763         return buf_size;
 764 }
 765 
 766 static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
 767                                    struct mtk_rx_dma *dma_rxd)
 768 {
 769         rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
 770         rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
 771         rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
 772         rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
 773 }
 774 
 775 /* the qdma core needs scratch memory to be setup */
 776 static int mtk_init_fq_dma(struct mtk_eth *eth)
 777 {
 778         dma_addr_t phy_ring_tail;
 779         int cnt = MTK_DMA_SIZE;
 780         dma_addr_t dma_addr;
 781         int i;
 782 
 783         eth->scratch_ring = dma_alloc_coherent(eth->dev,
 784                                                cnt * sizeof(struct mtk_tx_dma),
 785                                                &eth->phy_scratch_ring,
 786                                                GFP_ATOMIC);
 787         if (unlikely(!eth->scratch_ring))
 788                 return -ENOMEM;
 789 
 790         eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
 791                                     GFP_KERNEL);
 792         if (unlikely(!eth->scratch_head))
 793                 return -ENOMEM;
 794 
 795         dma_addr = dma_map_single(eth->dev,
 796                                   eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 797                                   DMA_FROM_DEVICE);
 798         if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
 799                 return -ENOMEM;
 800 
 801         phy_ring_tail = eth->phy_scratch_ring +
 802                         (sizeof(struct mtk_tx_dma) * (cnt - 1));
 803 
 804         for (i = 0; i < cnt; i++) {
 805                 eth->scratch_ring[i].txd1 =
 806                                         (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
 807                 if (i < cnt - 1)
 808                         eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
 809                                 ((i + 1) * sizeof(struct mtk_tx_dma)));
 810                 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
 811         }
 812 
 813         mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
 814         mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
 815         mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
 816         mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
 817 
 818         return 0;
 819 }
 820 
 821 static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
 822 {
 823         void *ret = ring->dma;
 824 
 825         return ret + (desc - ring->phys);
 826 }
 827 
 828 static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
 829                                                     struct mtk_tx_dma *txd)
 830 {
 831         int idx = txd - ring->dma;
 832 
 833         return &ring->buf[idx];
 834 }
 835 
 836 static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
 837                                        struct mtk_tx_dma *dma)
 838 {
 839         return ring->dma_pdma - ring->dma + dma;
 840 }
 841 
 842 static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
 843 {
 844         return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
 845 }
 846 
 847 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
 848 {
 849         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 850                 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
 851                         dma_unmap_single(eth->dev,
 852                                          dma_unmap_addr(tx_buf, dma_addr0),
 853                                          dma_unmap_len(tx_buf, dma_len0),
 854                                          DMA_TO_DEVICE);
 855                 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
 856                         dma_unmap_page(eth->dev,
 857                                        dma_unmap_addr(tx_buf, dma_addr0),
 858                                        dma_unmap_len(tx_buf, dma_len0),
 859                                        DMA_TO_DEVICE);
 860                 }
 861         } else {
 862                 if (dma_unmap_len(tx_buf, dma_len0)) {
 863                         dma_unmap_page(eth->dev,
 864                                        dma_unmap_addr(tx_buf, dma_addr0),
 865                                        dma_unmap_len(tx_buf, dma_len0),
 866                                        DMA_TO_DEVICE);
 867                 }
 868 
 869                 if (dma_unmap_len(tx_buf, dma_len1)) {
 870                         dma_unmap_page(eth->dev,
 871                                        dma_unmap_addr(tx_buf, dma_addr1),
 872                                        dma_unmap_len(tx_buf, dma_len1),
 873                                        DMA_TO_DEVICE);
 874                 }
 875         }
 876 
 877         tx_buf->flags = 0;
 878         if (tx_buf->skb &&
 879             (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
 880                 dev_kfree_skb_any(tx_buf->skb);
 881         tx_buf->skb = NULL;
 882 }
 883 
 884 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
 885                          struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
 886                          size_t size, int idx)
 887 {
 888         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 889                 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
 890                 dma_unmap_len_set(tx_buf, dma_len0, size);
 891         } else {
 892                 if (idx & 1) {
 893                         txd->txd3 = mapped_addr;
 894                         txd->txd2 |= TX_DMA_PLEN1(size);
 895                         dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
 896                         dma_unmap_len_set(tx_buf, dma_len1, size);
 897                 } else {
 898                         tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
 899                         txd->txd1 = mapped_addr;
 900                         txd->txd2 = TX_DMA_PLEN0(size);
 901                         dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
 902                         dma_unmap_len_set(tx_buf, dma_len0, size);
 903                 }
 904         }
 905 }
 906 
 907 static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 908                       int tx_num, struct mtk_tx_ring *ring, bool gso)
 909 {
 910         struct mtk_mac *mac = netdev_priv(dev);
 911         struct mtk_eth *eth = mac->hw;
 912         struct mtk_tx_dma *itxd, *txd;
 913         struct mtk_tx_dma *itxd_pdma, *txd_pdma;
 914         struct mtk_tx_buf *itx_buf, *tx_buf;
 915         dma_addr_t mapped_addr;
 916         unsigned int nr_frags;
 917         int i, n_desc = 1;
 918         u32 txd4 = 0, fport;
 919         int k = 0;
 920 
 921         itxd = ring->next_free;
 922         itxd_pdma = qdma_to_pdma(ring, itxd);
 923         if (itxd == ring->last_free)
 924                 return -ENOMEM;
 925 
 926         /* set the forward port */
 927         fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
 928         txd4 |= fport;
 929 
 930         itx_buf = mtk_desc_to_tx_buf(ring, itxd);
 931         memset(itx_buf, 0, sizeof(*itx_buf));
 932 
 933         if (gso)
 934                 txd4 |= TX_DMA_TSO;
 935 
 936         /* TX Checksum offload */
 937         if (skb->ip_summed == CHECKSUM_PARTIAL)
 938                 txd4 |= TX_DMA_CHKSUM;
 939 
 940         /* VLAN header offload */
 941         if (skb_vlan_tag_present(skb))
 942                 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
 943 
 944         mapped_addr = dma_map_single(eth->dev, skb->data,
 945                                      skb_headlen(skb), DMA_TO_DEVICE);
 946         if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
 947                 return -ENOMEM;
 948 
 949         WRITE_ONCE(itxd->txd1, mapped_addr);
 950         itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
 951         itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
 952                           MTK_TX_FLAGS_FPORT1;
 953         setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
 954                      k++);
 955 
 956         /* TX SG offload */
 957         txd = itxd;
 958         txd_pdma = qdma_to_pdma(ring, txd);
 959         nr_frags = skb_shinfo(skb)->nr_frags;
 960 
 961         for (i = 0; i < nr_frags; i++) {
 962                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 963                 unsigned int offset = 0;
 964                 int frag_size = skb_frag_size(frag);
 965 
 966                 while (frag_size) {
 967                         bool last_frag = false;
 968                         unsigned int frag_map_size;
 969                         bool new_desc = true;
 970 
 971                         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
 972                             (i & 0x1)) {
 973                                 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
 974                                 txd_pdma = qdma_to_pdma(ring, txd);
 975                                 if (txd == ring->last_free)
 976                                         goto err_dma;
 977 
 978                                 n_desc++;
 979                         } else {
 980                                 new_desc = false;
 981                         }
 982 
 983 
 984                         frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
 985                         mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
 986                                                        frag_map_size,
 987                                                        DMA_TO_DEVICE);
 988                         if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
 989                                 goto err_dma;
 990 
 991                         if (i == nr_frags - 1 &&
 992                             (frag_size - frag_map_size) == 0)
 993                                 last_frag = true;
 994 
 995                         WRITE_ONCE(txd->txd1, mapped_addr);
 996                         WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
 997                                                TX_DMA_PLEN0(frag_map_size) |
 998                                                last_frag * TX_DMA_LS0));
 999                         WRITE_ONCE(txd->txd4, fport);
1000 
1001                         tx_buf = mtk_desc_to_tx_buf(ring, txd);
1002                         if (new_desc)
1003                                 memset(tx_buf, 0, sizeof(*tx_buf));
1004                         tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1005                         tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1006                         tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1007                                          MTK_TX_FLAGS_FPORT1;
1008 
1009                         setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1010                                      frag_map_size, k++);
1011 
1012                         frag_size -= frag_map_size;
1013                         offset += frag_map_size;
1014                 }
1015         }
1016 
1017         /* store skb to cleanup */
1018         itx_buf->skb = skb;
1019 
1020         WRITE_ONCE(itxd->txd4, txd4);
1021         WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1022                                 (!nr_frags * TX_DMA_LS0)));
1023         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1024                 if (k & 0x1)
1025                         txd_pdma->txd2 |= TX_DMA_LS0;
1026                 else
1027                         txd_pdma->txd2 |= TX_DMA_LS1;
1028         }
1029 
1030         netdev_sent_queue(dev, skb->len);
1031         skb_tx_timestamp(skb);
1032 
1033         ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1034         atomic_sub(n_desc, &ring->free_count);
1035 
1036         /* make sure that all changes to the dma ring are flushed before we
1037          * continue
1038          */
1039         wmb();
1040 
1041         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1042                 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1043                     !netdev_xmit_more())
1044                         mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1045         } else {
1046                 int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1047                                              ring->dma_size);
1048                 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1049         }
1050 
1051         return 0;
1052 
1053 err_dma:
1054         do {
1055                 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1056 
1057                 /* unmap dma */
1058                 mtk_tx_unmap(eth, tx_buf);
1059 
1060                 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1061                 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1062                         itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1063 
1064                 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1065                 itxd_pdma = qdma_to_pdma(ring, itxd);
1066         } while (itxd != txd);
1067 
1068         return -ENOMEM;
1069 }
1070 
1071 static inline int mtk_cal_txd_req(struct sk_buff *skb)
1072 {
1073         int i, nfrags;
1074         skb_frag_t *frag;
1075 
1076         nfrags = 1;
1077         if (skb_is_gso(skb)) {
1078                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1079                         frag = &skb_shinfo(skb)->frags[i];
1080                         nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1081                                                 MTK_TX_DMA_BUF_LEN);
1082                 }
1083         } else {
1084                 nfrags += skb_shinfo(skb)->nr_frags;
1085         }
1086 
1087         return nfrags;
1088 }
1089 
1090 static int mtk_queue_stopped(struct mtk_eth *eth)
1091 {
1092         int i;
1093 
1094         for (i = 0; i < MTK_MAC_COUNT; i++) {
1095                 if (!eth->netdev[i])
1096                         continue;
1097                 if (netif_queue_stopped(eth->netdev[i]))
1098                         return 1;
1099         }
1100 
1101         return 0;
1102 }
1103 
1104 static void mtk_wake_queue(struct mtk_eth *eth)
1105 {
1106         int i;
1107 
1108         for (i = 0; i < MTK_MAC_COUNT; i++) {
1109                 if (!eth->netdev[i])
1110                         continue;
1111                 netif_wake_queue(eth->netdev[i]);
1112         }
1113 }
1114 
1115 static void mtk_stop_queue(struct mtk_eth *eth)
1116 {
1117         int i;
1118 
1119         for (i = 0; i < MTK_MAC_COUNT; i++) {
1120                 if (!eth->netdev[i])
1121                         continue;
1122                 netif_stop_queue(eth->netdev[i]);
1123         }
1124 }
1125 
1126 static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1127 {
1128         struct mtk_mac *mac = netdev_priv(dev);
1129         struct mtk_eth *eth = mac->hw;
1130         struct mtk_tx_ring *ring = &eth->tx_ring;
1131         struct net_device_stats *stats = &dev->stats;
1132         bool gso = false;
1133         int tx_num;
1134 
1135         /* normally we can rely on the stack not calling this more than once,
1136          * however we have 2 queues running on the same ring so we need to lock
1137          * the ring access
1138          */
1139         spin_lock(&eth->page_lock);
1140 
1141         if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1142                 goto drop;
1143 
1144         tx_num = mtk_cal_txd_req(skb);
1145         if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1146                 mtk_stop_queue(eth);
1147                 netif_err(eth, tx_queued, dev,
1148                           "Tx Ring full when queue awake!\n");
1149                 spin_unlock(&eth->page_lock);
1150                 return NETDEV_TX_BUSY;
1151         }
1152 
1153         /* TSO: fill MSS info in tcp checksum field */
1154         if (skb_is_gso(skb)) {
1155                 if (skb_cow_head(skb, 0)) {
1156                         netif_warn(eth, tx_err, dev,
1157                                    "GSO expand head fail.\n");
1158                         goto drop;
1159                 }
1160 
1161                 if (skb_shinfo(skb)->gso_type &
1162                                 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1163                         gso = true;
1164                         tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1165                 }
1166         }
1167 
1168         if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1169                 goto drop;
1170 
1171         if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1172                 mtk_stop_queue(eth);
1173 
1174         spin_unlock(&eth->page_lock);
1175 
1176         return NETDEV_TX_OK;
1177 
1178 drop:
1179         spin_unlock(&eth->page_lock);
1180         stats->tx_dropped++;
1181         dev_kfree_skb_any(skb);
1182         return NETDEV_TX_OK;
1183 }
1184 
1185 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1186 {
1187         int i;
1188         struct mtk_rx_ring *ring;
1189         int idx;
1190 
1191         if (!eth->hwlro)
1192                 return &eth->rx_ring[0];
1193 
1194         for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1195                 ring = &eth->rx_ring[i];
1196                 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1197                 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1198                         ring->calc_idx_update = true;
1199                         return ring;
1200                 }
1201         }
1202 
1203         return NULL;
1204 }
1205 
1206 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1207 {
1208         struct mtk_rx_ring *ring;
1209         int i;
1210 
1211         if (!eth->hwlro) {
1212                 ring = &eth->rx_ring[0];
1213                 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1214         } else {
1215                 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1216                         ring = &eth->rx_ring[i];
1217                         if (ring->calc_idx_update) {
1218                                 ring->calc_idx_update = false;
1219                                 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1220                         }
1221                 }
1222         }
1223 }
1224 
1225 static int mtk_poll_rx(struct napi_struct *napi, int budget,
1226                        struct mtk_eth *eth)
1227 {
1228         struct mtk_rx_ring *ring;
1229         int idx;
1230         struct sk_buff *skb;
1231         u8 *data, *new_data;
1232         struct mtk_rx_dma *rxd, trxd;
1233         int done = 0;
1234 
1235         while (done < budget) {
1236                 struct net_device *netdev;
1237                 unsigned int pktlen;
1238                 dma_addr_t dma_addr;
1239                 int mac;
1240 
1241                 ring = mtk_get_rx_ring(eth);
1242                 if (unlikely(!ring))
1243                         goto rx_done;
1244 
1245                 idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1246                 rxd = &ring->dma[idx];
1247                 data = ring->data[idx];
1248 
1249                 mtk_rx_get_desc(&trxd, rxd);
1250                 if (!(trxd.rxd2 & RX_DMA_DONE))
1251                         break;
1252 
1253                 /* find out which mac the packet come from. values start at 1 */
1254                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1255                         mac = 0;
1256                 } else {
1257                         mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1258                                 RX_DMA_FPORT_MASK;
1259                         mac--;
1260                 }
1261 
1262                 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1263                              !eth->netdev[mac]))
1264                         goto release_desc;
1265 
1266                 netdev = eth->netdev[mac];
1267 
1268                 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1269                         goto release_desc;
1270 
1271                 /* alloc new buffer */
1272                 new_data = napi_alloc_frag(ring->frag_size);
1273                 if (unlikely(!new_data)) {
1274                         netdev->stats.rx_dropped++;
1275                         goto release_desc;
1276                 }
1277                 dma_addr = dma_map_single(eth->dev,
1278                                           new_data + NET_SKB_PAD +
1279                                           eth->ip_align,
1280                                           ring->buf_size,
1281                                           DMA_FROM_DEVICE);
1282                 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1283                         skb_free_frag(new_data);
1284                         netdev->stats.rx_dropped++;
1285                         goto release_desc;
1286                 }
1287 
1288                 /* receive data */
1289                 skb = build_skb(data, ring->frag_size);
1290                 if (unlikely(!skb)) {
1291                         skb_free_frag(new_data);
1292                         netdev->stats.rx_dropped++;
1293                         goto release_desc;
1294                 }
1295                 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1296 
1297                 dma_unmap_single(eth->dev, trxd.rxd1,
1298                                  ring->buf_size, DMA_FROM_DEVICE);
1299                 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1300                 skb->dev = netdev;
1301                 skb_put(skb, pktlen);
1302                 if (trxd.rxd4 & eth->rx_dma_l4_valid)
1303                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1304                 else
1305                         skb_checksum_none_assert(skb);
1306                 skb->protocol = eth_type_trans(skb, netdev);
1307 
1308                 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1309                     RX_DMA_VID(trxd.rxd3))
1310                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1311                                                RX_DMA_VID(trxd.rxd3));
1312                 skb_record_rx_queue(skb, 0);
1313                 napi_gro_receive(napi, skb);
1314 
1315                 ring->data[idx] = new_data;
1316                 rxd->rxd1 = (unsigned int)dma_addr;
1317 
1318 release_desc:
1319                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1320                         rxd->rxd2 = RX_DMA_LSO;
1321                 else
1322                         rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1323 
1324                 ring->calc_idx = idx;
1325 
1326                 done++;
1327         }
1328 
1329 rx_done:
1330         if (done) {
1331                 /* make sure that all changes to the dma ring are flushed before
1332                  * we continue
1333                  */
1334                 wmb();
1335                 mtk_update_rx_cpu_idx(eth);
1336         }
1337 
1338         return done;
1339 }
1340 
1341 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1342                             unsigned int *done, unsigned int *bytes)
1343 {
1344         struct mtk_tx_ring *ring = &eth->tx_ring;
1345         struct mtk_tx_dma *desc;
1346         struct sk_buff *skb;
1347         struct mtk_tx_buf *tx_buf;
1348         u32 cpu, dma;
1349 
1350         cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1351         dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1352 
1353         desc = mtk_qdma_phys_to_virt(ring, cpu);
1354 
1355         while ((cpu != dma) && budget) {
1356                 u32 next_cpu = desc->txd2;
1357                 int mac = 0;
1358 
1359                 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1360                 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1361                         break;
1362 
1363                 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1364                 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1365                         mac = 1;
1366 
1367                 skb = tx_buf->skb;
1368                 if (!skb)
1369                         break;
1370 
1371                 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1372                         bytes[mac] += skb->len;
1373                         done[mac]++;
1374                         budget--;
1375                 }
1376                 mtk_tx_unmap(eth, tx_buf);
1377 
1378                 ring->last_free = desc;
1379                 atomic_inc(&ring->free_count);
1380 
1381                 cpu = next_cpu;
1382         }
1383 
1384         mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1385 
1386         return budget;
1387 }
1388 
1389 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1390                             unsigned int *done, unsigned int *bytes)
1391 {
1392         struct mtk_tx_ring *ring = &eth->tx_ring;
1393         struct mtk_tx_dma *desc;
1394         struct sk_buff *skb;
1395         struct mtk_tx_buf *tx_buf;
1396         u32 cpu, dma;
1397 
1398         cpu = ring->cpu_idx;
1399         dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1400 
1401         while ((cpu != dma) && budget) {
1402                 tx_buf = &ring->buf[cpu];
1403                 skb = tx_buf->skb;
1404                 if (!skb)
1405                         break;
1406 
1407                 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1408                         bytes[0] += skb->len;
1409                         done[0]++;
1410                         budget--;
1411                 }
1412 
1413                 mtk_tx_unmap(eth, tx_buf);
1414 
1415                 desc = &ring->dma[cpu];
1416                 ring->last_free = desc;
1417                 atomic_inc(&ring->free_count);
1418 
1419                 cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1420         }
1421 
1422         ring->cpu_idx = cpu;
1423 
1424         return budget;
1425 }
1426 
1427 static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1428 {
1429         struct mtk_tx_ring *ring = &eth->tx_ring;
1430         unsigned int done[MTK_MAX_DEVS];
1431         unsigned int bytes[MTK_MAX_DEVS];
1432         int total = 0, i;
1433 
1434         memset(done, 0, sizeof(done));
1435         memset(bytes, 0, sizeof(bytes));
1436 
1437         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1438                 budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1439         else
1440                 budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1441 
1442         for (i = 0; i < MTK_MAC_COUNT; i++) {
1443                 if (!eth->netdev[i] || !done[i])
1444                         continue;
1445                 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1446                 total += done[i];
1447         }
1448 
1449         if (mtk_queue_stopped(eth) &&
1450             (atomic_read(&ring->free_count) > ring->thresh))
1451                 mtk_wake_queue(eth);
1452 
1453         return total;
1454 }
1455 
1456 static void mtk_handle_status_irq(struct mtk_eth *eth)
1457 {
1458         u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1459 
1460         if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1461                 mtk_stats_update(eth);
1462                 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1463                         MTK_INT_STATUS2);
1464         }
1465 }
1466 
1467 static int mtk_napi_tx(struct napi_struct *napi, int budget)
1468 {
1469         struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1470         u32 status, mask;
1471         int tx_done = 0;
1472 
1473         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1474                 mtk_handle_status_irq(eth);
1475         mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1476         tx_done = mtk_poll_tx(eth, budget);
1477 
1478         if (unlikely(netif_msg_intr(eth))) {
1479                 status = mtk_r32(eth, eth->tx_int_status_reg);
1480                 mask = mtk_r32(eth, eth->tx_int_mask_reg);
1481                 dev_info(eth->dev,
1482                          "done tx %d, intr 0x%08x/0x%x\n",
1483                          tx_done, status, mask);
1484         }
1485 
1486         if (tx_done == budget)
1487                 return budget;
1488 
1489         status = mtk_r32(eth, eth->tx_int_status_reg);
1490         if (status & MTK_TX_DONE_INT)
1491                 return budget;
1492 
1493         napi_complete(napi);
1494         mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1495 
1496         return tx_done;
1497 }
1498 
1499 static int mtk_napi_rx(struct napi_struct *napi, int budget)
1500 {
1501         struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1502         u32 status, mask;
1503         int rx_done = 0;
1504         int remain_budget = budget;
1505 
1506         mtk_handle_status_irq(eth);
1507 
1508 poll_again:
1509         mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1510         rx_done = mtk_poll_rx(napi, remain_budget, eth);
1511 
1512         if (unlikely(netif_msg_intr(eth))) {
1513                 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1514                 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1515                 dev_info(eth->dev,
1516                          "done rx %d, intr 0x%08x/0x%x\n",
1517                          rx_done, status, mask);
1518         }
1519         if (rx_done == remain_budget)
1520                 return budget;
1521 
1522         status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1523         if (status & MTK_RX_DONE_INT) {
1524                 remain_budget -= rx_done;
1525                 goto poll_again;
1526         }
1527         napi_complete(napi);
1528         mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1529 
1530         return rx_done + budget - remain_budget;
1531 }
1532 
1533 static int mtk_tx_alloc(struct mtk_eth *eth)
1534 {
1535         struct mtk_tx_ring *ring = &eth->tx_ring;
1536         int i, sz = sizeof(*ring->dma);
1537 
1538         ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1539                                GFP_KERNEL);
1540         if (!ring->buf)
1541                 goto no_tx_mem;
1542 
1543         ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1544                                        &ring->phys, GFP_ATOMIC);
1545         if (!ring->dma)
1546                 goto no_tx_mem;
1547 
1548         for (i = 0; i < MTK_DMA_SIZE; i++) {
1549                 int next = (i + 1) % MTK_DMA_SIZE;
1550                 u32 next_ptr = ring->phys + next * sz;
1551 
1552                 ring->dma[i].txd2 = next_ptr;
1553                 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1554         }
1555 
1556         /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1557          * only as the framework. The real HW descriptors are the PDMA
1558          * descriptors in ring->dma_pdma.
1559          */
1560         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1561                 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1562                                                     &ring->phys_pdma,
1563                                                     GFP_ATOMIC);
1564                 if (!ring->dma_pdma)
1565                         goto no_tx_mem;
1566 
1567                 for (i = 0; i < MTK_DMA_SIZE; i++) {
1568                         ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1569                         ring->dma_pdma[i].txd4 = 0;
1570                 }
1571         }
1572 
1573         ring->dma_size = MTK_DMA_SIZE;
1574         atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1575         ring->next_free = &ring->dma[0];
1576         ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1577         ring->thresh = MAX_SKB_FRAGS;
1578 
1579         /* make sure that all changes to the dma ring are flushed before we
1580          * continue
1581          */
1582         wmb();
1583 
1584         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1585                 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1586                 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1587                 mtk_w32(eth,
1588                         ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1589                         MTK_QTX_CRX_PTR);
1590                 mtk_w32(eth,
1591                         ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1592                         MTK_QTX_DRX_PTR);
1593                 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1594                         MTK_QTX_CFG(0));
1595         } else {
1596                 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1597                 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1598                 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1599                 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1600         }
1601 
1602         return 0;
1603 
1604 no_tx_mem:
1605         return -ENOMEM;
1606 }
1607 
1608 static void mtk_tx_clean(struct mtk_eth *eth)
1609 {
1610         struct mtk_tx_ring *ring = &eth->tx_ring;
1611         int i;
1612 
1613         if (ring->buf) {
1614                 for (i = 0; i < MTK_DMA_SIZE; i++)
1615                         mtk_tx_unmap(eth, &ring->buf[i]);
1616                 kfree(ring->buf);
1617                 ring->buf = NULL;
1618         }
1619 
1620         if (ring->dma) {
1621                 dma_free_coherent(eth->dev,
1622                                   MTK_DMA_SIZE * sizeof(*ring->dma),
1623                                   ring->dma,
1624                                   ring->phys);
1625                 ring->dma = NULL;
1626         }
1627 
1628         if (ring->dma_pdma) {
1629                 dma_free_coherent(eth->dev,
1630                                   MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1631                                   ring->dma_pdma,
1632                                   ring->phys_pdma);
1633                 ring->dma_pdma = NULL;
1634         }
1635 }
1636 
1637 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1638 {
1639         struct mtk_rx_ring *ring;
1640         int rx_data_len, rx_dma_size;
1641         int i;
1642         u32 offset = 0;
1643 
1644         if (rx_flag == MTK_RX_FLAGS_QDMA) {
1645                 if (ring_no)
1646                         return -EINVAL;
1647                 ring = &eth->rx_ring_qdma;
1648                 offset = 0x1000;
1649         } else {
1650                 ring = &eth->rx_ring[ring_no];
1651         }
1652 
1653         if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1654                 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1655                 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1656         } else {
1657                 rx_data_len = ETH_DATA_LEN;
1658                 rx_dma_size = MTK_DMA_SIZE;
1659         }
1660 
1661         ring->frag_size = mtk_max_frag_size(rx_data_len);
1662         ring->buf_size = mtk_max_buf_size(ring->frag_size);
1663         ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1664                              GFP_KERNEL);
1665         if (!ring->data)
1666                 return -ENOMEM;
1667 
1668         for (i = 0; i < rx_dma_size; i++) {
1669                 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1670                 if (!ring->data[i])
1671                         return -ENOMEM;
1672         }
1673 
1674         ring->dma = dma_alloc_coherent(eth->dev,
1675                                        rx_dma_size * sizeof(*ring->dma),
1676                                        &ring->phys, GFP_ATOMIC);
1677         if (!ring->dma)
1678                 return -ENOMEM;
1679 
1680         for (i = 0; i < rx_dma_size; i++) {
1681                 dma_addr_t dma_addr = dma_map_single(eth->dev,
1682                                 ring->data[i] + NET_SKB_PAD + eth->ip_align,
1683                                 ring->buf_size,
1684                                 DMA_FROM_DEVICE);
1685                 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1686                         return -ENOMEM;
1687                 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1688 
1689                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1690                         ring->dma[i].rxd2 = RX_DMA_LSO;
1691                 else
1692                         ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1693         }
1694         ring->dma_size = rx_dma_size;
1695         ring->calc_idx_update = false;
1696         ring->calc_idx = rx_dma_size - 1;
1697         ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1698         /* make sure that all changes to the dma ring are flushed before we
1699          * continue
1700          */
1701         wmb();
1702 
1703         mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1704         mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1705         mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1706         mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1707 
1708         return 0;
1709 }
1710 
1711 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1712 {
1713         int i;
1714 
1715         if (ring->data && ring->dma) {
1716                 for (i = 0; i < ring->dma_size; i++) {
1717                         if (!ring->data[i])
1718                                 continue;
1719                         if (!ring->dma[i].rxd1)
1720                                 continue;
1721                         dma_unmap_single(eth->dev,
1722                                          ring->dma[i].rxd1,
1723                                          ring->buf_size,
1724                                          DMA_FROM_DEVICE);
1725                         skb_free_frag(ring->data[i]);
1726                 }
1727                 kfree(ring->data);
1728                 ring->data = NULL;
1729         }
1730 
1731         if (ring->dma) {
1732                 dma_free_coherent(eth->dev,
1733                                   ring->dma_size * sizeof(*ring->dma),
1734                                   ring->dma,
1735                                   ring->phys);
1736                 ring->dma = NULL;
1737         }
1738 }
1739 
1740 static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1741 {
1742         int i;
1743         u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1744         u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1745 
1746         /* set LRO rings to auto-learn modes */
1747         ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1748 
1749         /* validate LRO ring */
1750         ring_ctrl_dw2 |= MTK_RING_VLD;
1751 
1752         /* set AGE timer (unit: 20us) */
1753         ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1754         ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1755 
1756         /* set max AGG timer (unit: 20us) */
1757         ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1758 
1759         /* set max LRO AGG count */
1760         ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1761         ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1762 
1763         for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1764                 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1765                 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1766                 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1767         }
1768 
1769         /* IPv4 checksum update enable */
1770         lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1771 
1772         /* switch priority comparison to packet count mode */
1773         lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1774 
1775         /* bandwidth threshold setting */
1776         mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1777 
1778         /* auto-learn score delta setting */
1779         mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1780 
1781         /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1782         mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1783                 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1784 
1785         /* set HW LRO mode & the max aggregation count for rx packets */
1786         lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1787 
1788         /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1789         lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1790 
1791         /* enable HW LRO */
1792         lro_ctrl_dw0 |= MTK_LRO_EN;
1793 
1794         mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1795         mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1796 
1797         return 0;
1798 }
1799 
1800 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1801 {
1802         int i;
1803         u32 val;
1804 
1805         /* relinquish lro rings, flush aggregated packets */
1806         mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1807 
1808         /* wait for relinquishments done */
1809         for (i = 0; i < 10; i++) {
1810                 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1811                 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1812                         msleep(20);
1813                         continue;
1814                 }
1815                 break;
1816         }
1817 
1818         /* invalidate lro rings */
1819         for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1820                 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1821 
1822         /* disable HW LRO */
1823         mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1824 }
1825 
1826 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1827 {
1828         u32 reg_val;
1829 
1830         reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1831 
1832         /* invalidate the IP setting */
1833         mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1834 
1835         mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1836 
1837         /* validate the IP setting */
1838         mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1839 }
1840 
1841 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1842 {
1843         u32 reg_val;
1844 
1845         reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1846 
1847         /* invalidate the IP setting */
1848         mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1849 
1850         mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1851 }
1852 
1853 static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1854 {
1855         int cnt = 0;
1856         int i;
1857 
1858         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1859                 if (mac->hwlro_ip[i])
1860                         cnt++;
1861         }
1862 
1863         return cnt;
1864 }
1865 
1866 static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1867                                 struct ethtool_rxnfc *cmd)
1868 {
1869         struct ethtool_rx_flow_spec *fsp =
1870                 (struct ethtool_rx_flow_spec *)&cmd->fs;
1871         struct mtk_mac *mac = netdev_priv(dev);
1872         struct mtk_eth *eth = mac->hw;
1873         int hwlro_idx;
1874 
1875         if ((fsp->flow_type != TCP_V4_FLOW) ||
1876             (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1877             (fsp->location > 1))
1878                 return -EINVAL;
1879 
1880         mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1881         hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1882 
1883         mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1884 
1885         mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1886 
1887         return 0;
1888 }
1889 
1890 static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1891                                 struct ethtool_rxnfc *cmd)
1892 {
1893         struct ethtool_rx_flow_spec *fsp =
1894                 (struct ethtool_rx_flow_spec *)&cmd->fs;
1895         struct mtk_mac *mac = netdev_priv(dev);
1896         struct mtk_eth *eth = mac->hw;
1897         int hwlro_idx;
1898 
1899         if (fsp->location > 1)
1900                 return -EINVAL;
1901 
1902         mac->hwlro_ip[fsp->location] = 0;
1903         hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1904 
1905         mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1906 
1907         mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1908 
1909         return 0;
1910 }
1911 
1912 static void mtk_hwlro_netdev_disable(struct net_device *dev)
1913 {
1914         struct mtk_mac *mac = netdev_priv(dev);
1915         struct mtk_eth *eth = mac->hw;
1916         int i, hwlro_idx;
1917 
1918         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1919                 mac->hwlro_ip[i] = 0;
1920                 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1921 
1922                 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1923         }
1924 
1925         mac->hwlro_ip_cnt = 0;
1926 }
1927 
1928 static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1929                                     struct ethtool_rxnfc *cmd)
1930 {
1931         struct mtk_mac *mac = netdev_priv(dev);
1932         struct ethtool_rx_flow_spec *fsp =
1933                 (struct ethtool_rx_flow_spec *)&cmd->fs;
1934 
1935         /* only tcp dst ipv4 is meaningful, others are meaningless */
1936         fsp->flow_type = TCP_V4_FLOW;
1937         fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1938         fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1939 
1940         fsp->h_u.tcp_ip4_spec.ip4src = 0;
1941         fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1942         fsp->h_u.tcp_ip4_spec.psrc = 0;
1943         fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1944         fsp->h_u.tcp_ip4_spec.pdst = 0;
1945         fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1946         fsp->h_u.tcp_ip4_spec.tos = 0;
1947         fsp->m_u.tcp_ip4_spec.tos = 0xff;
1948 
1949         return 0;
1950 }
1951 
1952 static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1953                                   struct ethtool_rxnfc *cmd,
1954                                   u32 *rule_locs)
1955 {
1956         struct mtk_mac *mac = netdev_priv(dev);
1957         int cnt = 0;
1958         int i;
1959 
1960         for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1961                 if (mac->hwlro_ip[i]) {
1962                         rule_locs[cnt] = i;
1963                         cnt++;
1964                 }
1965         }
1966 
1967         cmd->rule_cnt = cnt;
1968 
1969         return 0;
1970 }
1971 
1972 static netdev_features_t mtk_fix_features(struct net_device *dev,
1973                                           netdev_features_t features)
1974 {
1975         if (!(features & NETIF_F_LRO)) {
1976                 struct mtk_mac *mac = netdev_priv(dev);
1977                 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1978 
1979                 if (ip_cnt) {
1980                         netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1981 
1982                         features |= NETIF_F_LRO;
1983                 }
1984         }
1985 
1986         return features;
1987 }
1988 
1989 static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1990 {
1991         int err = 0;
1992 
1993         if (!((dev->features ^ features) & NETIF_F_LRO))
1994                 return 0;
1995 
1996         if (!(features & NETIF_F_LRO))
1997                 mtk_hwlro_netdev_disable(dev);
1998 
1999         return err;
2000 }
2001 
2002 /* wait for DMA to finish whatever it is doing before we start using it again */
2003 static int mtk_dma_busy_wait(struct mtk_eth *eth)
2004 {
2005         unsigned long t_start = jiffies;
2006 
2007         while (1) {
2008                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2009                         if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2010                               (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2011                                 return 0;
2012                 } else {
2013                         if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2014                               (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2015                                 return 0;
2016                 }
2017 
2018                 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2019                         break;
2020         }
2021 
2022         dev_err(eth->dev, "DMA init timeout\n");
2023         return -1;
2024 }
2025 
2026 static int mtk_dma_init(struct mtk_eth *eth)
2027 {
2028         int err;
2029         u32 i;
2030 
2031         if (mtk_dma_busy_wait(eth))
2032                 return -EBUSY;
2033 
2034         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2035                 /* QDMA needs scratch memory for internal reordering of the
2036                  * descriptors
2037                  */
2038                 err = mtk_init_fq_dma(eth);
2039                 if (err)
2040                         return err;
2041         }
2042 
2043         err = mtk_tx_alloc(eth);
2044         if (err)
2045                 return err;
2046 
2047         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2048                 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2049                 if (err)
2050                         return err;
2051         }
2052 
2053         err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2054         if (err)
2055                 return err;
2056 
2057         if (eth->hwlro) {
2058                 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2059                         err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2060                         if (err)
2061                                 return err;
2062                 }
2063                 err = mtk_hwlro_rx_init(eth);
2064                 if (err)
2065                         return err;
2066         }
2067 
2068         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2069                 /* Enable random early drop and set drop threshold
2070                  * automatically
2071                  */
2072                 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2073                         FC_THRES_MIN, MTK_QDMA_FC_THRES);
2074                 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2075         }
2076 
2077         return 0;
2078 }
2079 
2080 static void mtk_dma_free(struct mtk_eth *eth)
2081 {
2082         int i;
2083 
2084         for (i = 0; i < MTK_MAC_COUNT; i++)
2085                 if (eth->netdev[i])
2086                         netdev_reset_queue(eth->netdev[i]);
2087         if (eth->scratch_ring) {
2088                 dma_free_coherent(eth->dev,
2089                                   MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2090                                   eth->scratch_ring,
2091                                   eth->phy_scratch_ring);
2092                 eth->scratch_ring = NULL;
2093                 eth->phy_scratch_ring = 0;
2094         }
2095         mtk_tx_clean(eth);
2096         mtk_rx_clean(eth, &eth->rx_ring[0]);
2097         mtk_rx_clean(eth, &eth->rx_ring_qdma);
2098 
2099         if (eth->hwlro) {
2100                 mtk_hwlro_rx_uninit(eth);
2101                 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2102                         mtk_rx_clean(eth, &eth->rx_ring[i]);
2103         }
2104 
2105         kfree(eth->scratch_head);
2106 }
2107 
2108 static void mtk_tx_timeout(struct net_device *dev)
2109 {
2110         struct mtk_mac *mac = netdev_priv(dev);
2111         struct mtk_eth *eth = mac->hw;
2112 
2113         eth->netdev[mac->id]->stats.tx_errors++;
2114         netif_err(eth, tx_err, dev,
2115                   "transmit timed out\n");
2116         schedule_work(&eth->pending_work);
2117 }
2118 
2119 static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2120 {
2121         struct mtk_eth *eth = _eth;
2122 
2123         if (likely(napi_schedule_prep(&eth->rx_napi))) {
2124                 __napi_schedule(&eth->rx_napi);
2125                 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2126         }
2127 
2128         return IRQ_HANDLED;
2129 }
2130 
2131 static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2132 {
2133         struct mtk_eth *eth = _eth;
2134 
2135         if (likely(napi_schedule_prep(&eth->tx_napi))) {
2136                 __napi_schedule(&eth->tx_napi);
2137                 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2138         }
2139 
2140         return IRQ_HANDLED;
2141 }
2142 
2143 static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2144 {
2145         struct mtk_eth *eth = _eth;
2146 
2147         if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2148                 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2149                         mtk_handle_irq_rx(irq, _eth);
2150         }
2151         if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2152                 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2153                         mtk_handle_irq_tx(irq, _eth);
2154         }
2155 
2156         return IRQ_HANDLED;
2157 }
2158 
2159 #ifdef CONFIG_NET_POLL_CONTROLLER
2160 static void mtk_poll_controller(struct net_device *dev)
2161 {
2162         struct mtk_mac *mac = netdev_priv(dev);
2163         struct mtk_eth *eth = mac->hw;
2164 
2165         mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2166         mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2167         mtk_handle_irq_rx(eth->irq[2], dev);
2168         mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2169         mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2170 }
2171 #endif
2172 
2173 static int mtk_start_dma(struct mtk_eth *eth)
2174 {
2175         u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2176         int err;
2177 
2178         err = mtk_dma_init(eth);
2179         if (err) {
2180                 mtk_dma_free(eth);
2181                 return err;
2182         }
2183 
2184         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2185                 mtk_w32(eth,
2186                         MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2187                         MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2188                         MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2189                         MTK_RX_BT_32DWORDS,
2190                         MTK_QDMA_GLO_CFG);
2191 
2192                 mtk_w32(eth,
2193                         MTK_RX_DMA_EN | rx_2b_offset |
2194                         MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2195                         MTK_PDMA_GLO_CFG);
2196         } else {
2197                 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2198                         MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2199                         MTK_PDMA_GLO_CFG);
2200         }
2201 
2202         return 0;
2203 }
2204 
2205 static int mtk_open(struct net_device *dev)
2206 {
2207         struct mtk_mac *mac = netdev_priv(dev);
2208         struct mtk_eth *eth = mac->hw;
2209         int err;
2210 
2211         err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2212         if (err) {
2213                 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2214                            err);
2215                 return err;
2216         }
2217 
2218         /* we run 2 netdevs on the same dma ring so we only bring it up once */
2219         if (!refcount_read(&eth->dma_refcnt)) {
2220                 int err = mtk_start_dma(eth);
2221 
2222                 if (err)
2223                         return err;
2224 
2225                 napi_enable(&eth->tx_napi);
2226                 napi_enable(&eth->rx_napi);
2227                 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2228                 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2229                 refcount_set(&eth->dma_refcnt, 1);
2230         }
2231         else
2232                 refcount_inc(&eth->dma_refcnt);
2233 
2234         phylink_start(mac->phylink);
2235         netif_start_queue(dev);
2236         return 0;
2237 }
2238 
2239 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2240 {
2241         u32 val;
2242         int i;
2243 
2244         /* stop the dma engine */
2245         spin_lock_bh(&eth->page_lock);
2246         val = mtk_r32(eth, glo_cfg);
2247         mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2248                 glo_cfg);
2249         spin_unlock_bh(&eth->page_lock);
2250 
2251         /* wait for dma stop */
2252         for (i = 0; i < 10; i++) {
2253                 val = mtk_r32(eth, glo_cfg);
2254                 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2255                         msleep(20);
2256                         continue;
2257                 }
2258                 break;
2259         }
2260 }
2261 
2262 static int mtk_stop(struct net_device *dev)
2263 {
2264         struct mtk_mac *mac = netdev_priv(dev);
2265         struct mtk_eth *eth = mac->hw;
2266 
2267         phylink_stop(mac->phylink);
2268 
2269         netif_tx_disable(dev);
2270 
2271         phylink_disconnect_phy(mac->phylink);
2272 
2273         /* only shutdown DMA if this is the last user */
2274         if (!refcount_dec_and_test(&eth->dma_refcnt))
2275                 return 0;
2276 
2277         mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2278         mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2279         napi_disable(&eth->tx_napi);
2280         napi_disable(&eth->rx_napi);
2281 
2282         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2283                 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2284         mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2285 
2286         mtk_dma_free(eth);
2287 
2288         return 0;
2289 }
2290 
2291 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2292 {
2293         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2294                            reset_bits,
2295                            reset_bits);
2296 
2297         usleep_range(1000, 1100);
2298         regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2299                            reset_bits,
2300                            ~reset_bits);
2301         mdelay(10);
2302 }
2303 
2304 static void mtk_clk_disable(struct mtk_eth *eth)
2305 {
2306         int clk;
2307 
2308         for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2309                 clk_disable_unprepare(eth->clks[clk]);
2310 }
2311 
2312 static int mtk_clk_enable(struct mtk_eth *eth)
2313 {
2314         int clk, ret;
2315 
2316         for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2317                 ret = clk_prepare_enable(eth->clks[clk]);
2318                 if (ret)
2319                         goto err_disable_clks;
2320         }
2321 
2322         return 0;
2323 
2324 err_disable_clks:
2325         while (--clk >= 0)
2326                 clk_disable_unprepare(eth->clks[clk]);
2327 
2328         return ret;
2329 }
2330 
2331 static int mtk_hw_init(struct mtk_eth *eth)
2332 {
2333         int i, val, ret;
2334 
2335         if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2336                 return 0;
2337 
2338         pm_runtime_enable(eth->dev);
2339         pm_runtime_get_sync(eth->dev);
2340 
2341         ret = mtk_clk_enable(eth);
2342         if (ret)
2343                 goto err_disable_pm;
2344 
2345         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2346                 ret = device_reset(eth->dev);
2347                 if (ret) {
2348                         dev_err(eth->dev, "MAC reset failed!\n");
2349                         goto err_disable_pm;
2350                 }
2351 
2352                 /* enable interrupt delay for RX */
2353                 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2354 
2355                 /* disable delay and normal interrupt */
2356                 mtk_tx_irq_disable(eth, ~0);
2357                 mtk_rx_irq_disable(eth, ~0);
2358 
2359                 return 0;
2360         }
2361 
2362         /* Non-MT7628 handling... */
2363         ethsys_reset(eth, RSTCTRL_FE);
2364         ethsys_reset(eth, RSTCTRL_PPE);
2365 
2366         if (eth->pctl) {
2367                 /* Set GE2 driving and slew rate */
2368                 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2369 
2370                 /* set GE2 TDSEL */
2371                 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2372 
2373                 /* set GE2 TUNE */
2374                 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2375         }
2376 
2377         /* Set linkdown as the default for each GMAC. Its own MCR would be set
2378          * up with the more appropriate value when mtk_mac_config call is being
2379          * invoked.
2380          */
2381         for (i = 0; i < MTK_MAC_COUNT; i++)
2382                 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2383 
2384         /* Indicates CDM to parse the MTK special tag from CPU
2385          * which also is working out for untag packets.
2386          */
2387         val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2388         mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2389 
2390         /* Enable RX VLan Offloading */
2391         mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2392 
2393         /* enable interrupt delay for RX */
2394         mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2395 
2396         /* disable delay and normal interrupt */
2397         mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2398         mtk_tx_irq_disable(eth, ~0);
2399         mtk_rx_irq_disable(eth, ~0);
2400         mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2401         mtk_w32(eth, 0, MTK_RST_GL);
2402 
2403         /* FE int grouping */
2404         mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2405         mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2406         mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2407         mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2408         mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2409 
2410         for (i = 0; i < MTK_MAC_COUNT; i++) {
2411                 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2412 
2413                 /* setup the forward port to send frame to PDMA */
2414                 val &= ~0xffff;
2415 
2416                 /* Enable RX checksum */
2417                 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2418 
2419                 /* setup the mac dma */
2420                 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2421         }
2422 
2423         return 0;
2424 
2425 err_disable_pm:
2426         pm_runtime_put_sync(eth->dev);
2427         pm_runtime_disable(eth->dev);
2428 
2429         return ret;
2430 }
2431 
2432 static int mtk_hw_deinit(struct mtk_eth *eth)
2433 {
2434         if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2435                 return 0;
2436 
2437         mtk_clk_disable(eth);
2438 
2439         pm_runtime_put_sync(eth->dev);
2440         pm_runtime_disable(eth->dev);
2441 
2442         return 0;
2443 }
2444 
2445 static int __init mtk_init(struct net_device *dev)
2446 {
2447         struct mtk_mac *mac = netdev_priv(dev);
2448         struct mtk_eth *eth = mac->hw;
2449         const char *mac_addr;
2450 
2451         mac_addr = of_get_mac_address(mac->of_node);
2452         if (!IS_ERR(mac_addr))
2453                 ether_addr_copy(dev->dev_addr, mac_addr);
2454 
2455         /* If the mac address is invalid, use random mac address  */
2456         if (!is_valid_ether_addr(dev->dev_addr)) {
2457                 eth_hw_addr_random(dev);
2458                 dev_err(eth->dev, "generated random MAC address %pM\n",
2459                         dev->dev_addr);
2460         }
2461 
2462         return 0;
2463 }
2464 
2465 static void mtk_uninit(struct net_device *dev)
2466 {
2467         struct mtk_mac *mac = netdev_priv(dev);
2468         struct mtk_eth *eth = mac->hw;
2469 
2470         phylink_disconnect_phy(mac->phylink);
2471         mtk_tx_irq_disable(eth, ~0);
2472         mtk_rx_irq_disable(eth, ~0);
2473 }
2474 
2475 static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2476 {
2477         struct mtk_mac *mac = netdev_priv(dev);
2478 
2479         switch (cmd) {
2480         case SIOCGMIIPHY:
2481         case SIOCGMIIREG:
2482         case SIOCSMIIREG:
2483                 return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2484         default:
2485                 break;
2486         }
2487 
2488         return -EOPNOTSUPP;
2489 }
2490 
2491 static void mtk_pending_work(struct work_struct *work)
2492 {
2493         struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2494         int err, i;
2495         unsigned long restart = 0;
2496 
2497         rtnl_lock();
2498 
2499         dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2500 
2501         while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2502                 cpu_relax();
2503 
2504         dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2505         /* stop all devices to make sure that dma is properly shut down */
2506         for (i = 0; i < MTK_MAC_COUNT; i++) {
2507                 if (!eth->netdev[i])
2508                         continue;
2509                 mtk_stop(eth->netdev[i]);
2510                 __set_bit(i, &restart);
2511         }
2512         dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2513 
2514         /* restart underlying hardware such as power, clock, pin mux
2515          * and the connected phy
2516          */
2517         mtk_hw_deinit(eth);
2518 
2519         if (eth->dev->pins)
2520                 pinctrl_select_state(eth->dev->pins->p,
2521                                      eth->dev->pins->default_state);
2522         mtk_hw_init(eth);
2523 
2524         /* restart DMA and enable IRQs */
2525         for (i = 0; i < MTK_MAC_COUNT; i++) {
2526                 if (!test_bit(i, &restart))
2527                         continue;
2528                 err = mtk_open(eth->netdev[i]);
2529                 if (err) {
2530                         netif_alert(eth, ifup, eth->netdev[i],
2531                               "Driver up/down cycle failed, closing device.\n");
2532                         dev_close(eth->netdev[i]);
2533                 }
2534         }
2535 
2536         dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2537 
2538         clear_bit_unlock(MTK_RESETTING, &eth->state);
2539 
2540         rtnl_unlock();
2541 }
2542 
2543 static int mtk_free_dev(struct mtk_eth *eth)
2544 {
2545         int i;
2546 
2547         for (i = 0; i < MTK_MAC_COUNT; i++) {
2548                 if (!eth->netdev[i])
2549                         continue;
2550                 free_netdev(eth->netdev[i]);
2551         }
2552 
2553         return 0;
2554 }
2555 
2556 static int mtk_unreg_dev(struct mtk_eth *eth)
2557 {
2558         int i;
2559 
2560         for (i = 0; i < MTK_MAC_COUNT; i++) {
2561                 if (!eth->netdev[i])
2562                         continue;
2563                 unregister_netdev(eth->netdev[i]);
2564         }
2565 
2566         return 0;
2567 }
2568 
2569 static int mtk_cleanup(struct mtk_eth *eth)
2570 {
2571         mtk_unreg_dev(eth);
2572         mtk_free_dev(eth);
2573         cancel_work_sync(&eth->pending_work);
2574 
2575         return 0;
2576 }
2577 
2578 static int mtk_get_link_ksettings(struct net_device *ndev,
2579                                   struct ethtool_link_ksettings *cmd)
2580 {
2581         struct mtk_mac *mac = netdev_priv(ndev);
2582 
2583         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2584                 return -EBUSY;
2585 
2586         return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2587 }
2588 
2589 static int mtk_set_link_ksettings(struct net_device *ndev,
2590                                   const struct ethtool_link_ksettings *cmd)
2591 {
2592         struct mtk_mac *mac = netdev_priv(ndev);
2593 
2594         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2595                 return -EBUSY;
2596 
2597         return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2598 }
2599 
2600 static void mtk_get_drvinfo(struct net_device *dev,
2601                             struct ethtool_drvinfo *info)
2602 {
2603         struct mtk_mac *mac = netdev_priv(dev);
2604 
2605         strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2606         strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2607         info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2608 }
2609 
2610 static u32 mtk_get_msglevel(struct net_device *dev)
2611 {
2612         struct mtk_mac *mac = netdev_priv(dev);
2613 
2614         return mac->hw->msg_enable;
2615 }
2616 
2617 static void mtk_set_msglevel(struct net_device *dev, u32 value)
2618 {
2619         struct mtk_mac *mac = netdev_priv(dev);
2620 
2621         mac->hw->msg_enable = value;
2622 }
2623 
2624 static int mtk_nway_reset(struct net_device *dev)
2625 {
2626         struct mtk_mac *mac = netdev_priv(dev);
2627 
2628         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2629                 return -EBUSY;
2630 
2631         if (!mac->phylink)
2632                 return -ENOTSUPP;
2633 
2634         return phylink_ethtool_nway_reset(mac->phylink);
2635 }
2636 
2637 static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2638 {
2639         int i;
2640 
2641         switch (stringset) {
2642         case ETH_SS_STATS:
2643                 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2644                         memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2645                         data += ETH_GSTRING_LEN;
2646                 }
2647                 break;
2648         }
2649 }
2650 
2651 static int mtk_get_sset_count(struct net_device *dev, int sset)
2652 {
2653         switch (sset) {
2654         case ETH_SS_STATS:
2655                 return ARRAY_SIZE(mtk_ethtool_stats);
2656         default:
2657                 return -EOPNOTSUPP;
2658         }
2659 }
2660 
2661 static void mtk_get_ethtool_stats(struct net_device *dev,
2662                                   struct ethtool_stats *stats, u64 *data)
2663 {
2664         struct mtk_mac *mac = netdev_priv(dev);
2665         struct mtk_hw_stats *hwstats = mac->hw_stats;
2666         u64 *data_src, *data_dst;
2667         unsigned int start;
2668         int i;
2669 
2670         if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2671                 return;
2672 
2673         if (netif_running(dev) && netif_device_present(dev)) {
2674                 if (spin_trylock_bh(&hwstats->stats_lock)) {
2675                         mtk_stats_update_mac(mac);
2676                         spin_unlock_bh(&hwstats->stats_lock);
2677                 }
2678         }
2679 
2680         data_src = (u64 *)hwstats;
2681 
2682         do {
2683                 data_dst = data;
2684                 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2685 
2686                 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2687                         *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2688         } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2689 }
2690 
2691 static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2692                          u32 *rule_locs)
2693 {
2694         int ret = -EOPNOTSUPP;
2695 
2696         switch (cmd->cmd) {
2697         case ETHTOOL_GRXRINGS:
2698                 if (dev->hw_features & NETIF_F_LRO) {
2699                         cmd->data = MTK_MAX_RX_RING_NUM;
2700                         ret = 0;
2701                 }
2702                 break;
2703         case ETHTOOL_GRXCLSRLCNT:
2704                 if (dev->hw_features & NETIF_F_LRO) {
2705                         struct mtk_mac *mac = netdev_priv(dev);
2706 
2707                         cmd->rule_cnt = mac->hwlro_ip_cnt;
2708                         ret = 0;
2709                 }
2710                 break;
2711         case ETHTOOL_GRXCLSRULE:
2712                 if (dev->hw_features & NETIF_F_LRO)
2713                         ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2714                 break;
2715         case ETHTOOL_GRXCLSRLALL:
2716                 if (dev->hw_features & NETIF_F_LRO)
2717                         ret = mtk_hwlro_get_fdir_all(dev, cmd,
2718                                                      rule_locs);
2719                 break;
2720         default:
2721                 break;
2722         }
2723 
2724         return ret;
2725 }
2726 
2727 static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2728 {
2729         int ret = -EOPNOTSUPP;
2730 
2731         switch (cmd->cmd) {
2732         case ETHTOOL_SRXCLSRLINS:
2733                 if (dev->hw_features & NETIF_F_LRO)
2734                         ret = mtk_hwlro_add_ipaddr(dev, cmd);
2735                 break;
2736         case ETHTOOL_SRXCLSRLDEL:
2737                 if (dev->hw_features & NETIF_F_LRO)
2738                         ret = mtk_hwlro_del_ipaddr(dev, cmd);
2739                 break;
2740         default:
2741                 break;
2742         }
2743 
2744         return ret;
2745 }
2746 
2747 static const struct ethtool_ops mtk_ethtool_ops = {
2748         .get_link_ksettings     = mtk_get_link_ksettings,
2749         .set_link_ksettings     = mtk_set_link_ksettings,
2750         .get_drvinfo            = mtk_get_drvinfo,
2751         .get_msglevel           = mtk_get_msglevel,
2752         .set_msglevel           = mtk_set_msglevel,
2753         .nway_reset             = mtk_nway_reset,
2754         .get_link               = ethtool_op_get_link,
2755         .get_strings            = mtk_get_strings,
2756         .get_sset_count         = mtk_get_sset_count,
2757         .get_ethtool_stats      = mtk_get_ethtool_stats,
2758         .get_rxnfc              = mtk_get_rxnfc,
2759         .set_rxnfc              = mtk_set_rxnfc,
2760 };
2761 
2762 static const struct net_device_ops mtk_netdev_ops = {
2763         .ndo_init               = mtk_init,
2764         .ndo_uninit             = mtk_uninit,
2765         .ndo_open               = mtk_open,
2766         .ndo_stop               = mtk_stop,
2767         .ndo_start_xmit         = mtk_start_xmit,
2768         .ndo_set_mac_address    = mtk_set_mac_address,
2769         .ndo_validate_addr      = eth_validate_addr,
2770         .ndo_do_ioctl           = mtk_do_ioctl,
2771         .ndo_tx_timeout         = mtk_tx_timeout,
2772         .ndo_get_stats64        = mtk_get_stats64,
2773         .ndo_fix_features       = mtk_fix_features,
2774         .ndo_set_features       = mtk_set_features,
2775 #ifdef CONFIG_NET_POLL_CONTROLLER
2776         .ndo_poll_controller    = mtk_poll_controller,
2777 #endif
2778 };
2779 
2780 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2781 {
2782         const __be32 *_id = of_get_property(np, "reg", NULL);
2783         struct phylink *phylink;
2784         int phy_mode, id, err;
2785         struct mtk_mac *mac;
2786 
2787         if (!_id) {
2788                 dev_err(eth->dev, "missing mac id\n");
2789                 return -EINVAL;
2790         }
2791 
2792         id = be32_to_cpup(_id);
2793         if (id >= MTK_MAC_COUNT) {
2794                 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2795                 return -EINVAL;
2796         }
2797 
2798         if (eth->netdev[id]) {
2799                 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2800                 return -EINVAL;
2801         }
2802 
2803         eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2804         if (!eth->netdev[id]) {
2805                 dev_err(eth->dev, "alloc_etherdev failed\n");
2806                 return -ENOMEM;
2807         }
2808         mac = netdev_priv(eth->netdev[id]);
2809         eth->mac[id] = mac;
2810         mac->id = id;
2811         mac->hw = eth;
2812         mac->of_node = np;
2813 
2814         memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2815         mac->hwlro_ip_cnt = 0;
2816 
2817         mac->hw_stats = devm_kzalloc(eth->dev,
2818                                      sizeof(*mac->hw_stats),
2819                                      GFP_KERNEL);
2820         if (!mac->hw_stats) {
2821                 dev_err(eth->dev, "failed to allocate counter memory\n");
2822                 err = -ENOMEM;
2823                 goto free_netdev;
2824         }
2825         spin_lock_init(&mac->hw_stats->stats_lock);
2826         u64_stats_init(&mac->hw_stats->syncp);
2827         mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2828 
2829         /* phylink create */
2830         phy_mode = of_get_phy_mode(np);
2831         if (phy_mode < 0) {
2832                 dev_err(eth->dev, "incorrect phy-mode\n");
2833                 err = -EINVAL;
2834                 goto free_netdev;
2835         }
2836 
2837         /* mac config is not set */
2838         mac->interface = PHY_INTERFACE_MODE_NA;
2839         mac->mode = MLO_AN_PHY;
2840         mac->speed = SPEED_UNKNOWN;
2841 
2842         mac->phylink_config.dev = &eth->netdev[id]->dev;
2843         mac->phylink_config.type = PHYLINK_NETDEV;
2844 
2845         phylink = phylink_create(&mac->phylink_config,
2846                                  of_fwnode_handle(mac->of_node),
2847                                  phy_mode, &mtk_phylink_ops);
2848         if (IS_ERR(phylink)) {
2849                 err = PTR_ERR(phylink);
2850                 goto free_netdev;
2851         }
2852 
2853         mac->phylink = phylink;
2854 
2855         SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2856         eth->netdev[id]->watchdog_timeo = 5 * HZ;
2857         eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2858         eth->netdev[id]->base_addr = (unsigned long)eth->base;
2859 
2860         eth->netdev[id]->hw_features = eth->soc->hw_features;
2861         if (eth->hwlro)
2862                 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2863 
2864         eth->netdev[id]->vlan_features = eth->soc->hw_features &
2865                 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2866         eth->netdev[id]->features |= eth->soc->hw_features;
2867         eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2868 
2869         eth->netdev[id]->irq = eth->irq[0];
2870         eth->netdev[id]->dev.of_node = np;
2871 
2872         return 0;
2873 
2874 free_netdev:
2875         free_netdev(eth->netdev[id]);
2876         return err;
2877 }
2878 
2879 static int mtk_probe(struct platform_device *pdev)
2880 {
2881         struct device_node *mac_np;
2882         struct mtk_eth *eth;
2883         int err, i;
2884 
2885         eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2886         if (!eth)
2887                 return -ENOMEM;
2888 
2889         eth->soc = of_device_get_match_data(&pdev->dev);
2890 
2891         eth->dev = &pdev->dev;
2892         eth->base = devm_platform_ioremap_resource(pdev, 0);
2893         if (IS_ERR(eth->base))
2894                 return PTR_ERR(eth->base);
2895 
2896         if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2897                 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2898                 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2899         } else {
2900                 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2901                 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2902         }
2903 
2904         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2905                 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2906                 eth->ip_align = NET_IP_ALIGN;
2907         } else {
2908                 eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2909         }
2910 
2911         spin_lock_init(&eth->page_lock);
2912         spin_lock_init(&eth->tx_irq_lock);
2913         spin_lock_init(&eth->rx_irq_lock);
2914 
2915         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2916                 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2917                                                               "mediatek,ethsys");
2918                 if (IS_ERR(eth->ethsys)) {
2919                         dev_err(&pdev->dev, "no ethsys regmap found\n");
2920                         return PTR_ERR(eth->ethsys);
2921                 }
2922         }
2923 
2924         if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2925                 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2926                                                              "mediatek,infracfg");
2927                 if (IS_ERR(eth->infra)) {
2928                         dev_err(&pdev->dev, "no infracfg regmap found\n");
2929                         return PTR_ERR(eth->infra);
2930                 }
2931         }
2932 
2933         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2934                 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
2935                                           GFP_KERNEL);
2936                 if (!eth->sgmii)
2937                         return -ENOMEM;
2938 
2939                 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
2940                                      eth->soc->ana_rgc3);
2941 
2942                 if (err)
2943                         return err;
2944         }
2945 
2946         if (eth->soc->required_pctl) {
2947                 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2948                                                             "mediatek,pctl");
2949                 if (IS_ERR(eth->pctl)) {
2950                         dev_err(&pdev->dev, "no pctl regmap found\n");
2951                         return PTR_ERR(eth->pctl);
2952                 }
2953         }
2954 
2955         for (i = 0; i < 3; i++) {
2956                 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
2957                         eth->irq[i] = eth->irq[0];
2958                 else
2959                         eth->irq[i] = platform_get_irq(pdev, i);
2960                 if (eth->irq[i] < 0) {
2961                         dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2962                         return -ENXIO;
2963                 }
2964         }
2965         for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2966                 eth->clks[i] = devm_clk_get(eth->dev,
2967                                             mtk_clks_source_name[i]);
2968                 if (IS_ERR(eth->clks[i])) {
2969                         if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2970                                 return -EPROBE_DEFER;
2971                         if (eth->soc->required_clks & BIT(i)) {
2972                                 dev_err(&pdev->dev, "clock %s not found\n",
2973                                         mtk_clks_source_name[i]);
2974                                 return -EINVAL;
2975                         }
2976                         eth->clks[i] = NULL;
2977                 }
2978         }
2979 
2980         eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2981         INIT_WORK(&eth->pending_work, mtk_pending_work);
2982 
2983         err = mtk_hw_init(eth);
2984         if (err)
2985                 return err;
2986 
2987         eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
2988 
2989         for_each_child_of_node(pdev->dev.of_node, mac_np) {
2990                 if (!of_device_is_compatible(mac_np,
2991                                              "mediatek,eth-mac"))
2992                         continue;
2993 
2994                 if (!of_device_is_available(mac_np))
2995                         continue;
2996 
2997                 err = mtk_add_mac(eth, mac_np);
2998                 if (err) {
2999                         of_node_put(mac_np);
3000                         goto err_deinit_hw;
3001                 }
3002         }
3003 
3004         if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3005                 err = devm_request_irq(eth->dev, eth->irq[0],
3006                                        mtk_handle_irq, 0,
3007                                        dev_name(eth->dev), eth);
3008         } else {
3009                 err = devm_request_irq(eth->dev, eth->irq[1],
3010                                        mtk_handle_irq_tx, 0,
3011                                        dev_name(eth->dev), eth);
3012                 if (err)
3013                         goto err_free_dev;
3014 
3015                 err = devm_request_irq(eth->dev, eth->irq[2],
3016                                        mtk_handle_irq_rx, 0,
3017                                        dev_name(eth->dev), eth);
3018         }
3019         if (err)
3020                 goto err_free_dev;
3021 
3022         /* No MT7628/88 support yet */
3023         if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3024                 err = mtk_mdio_init(eth);
3025                 if (err)
3026                         goto err_free_dev;
3027         }
3028 
3029         for (i = 0; i < MTK_MAX_DEVS; i++) {
3030                 if (!eth->netdev[i])
3031                         continue;
3032 
3033                 err = register_netdev(eth->netdev[i]);
3034                 if (err) {
3035                         dev_err(eth->dev, "error bringing up device\n");
3036                         goto err_deinit_mdio;
3037                 } else
3038                         netif_info(eth, probe, eth->netdev[i],
3039                                    "mediatek frame engine at 0x%08lx, irq %d\n",
3040                                    eth->netdev[i]->base_addr, eth->irq[0]);
3041         }
3042 
3043         /* we run 2 devices on the same DMA ring so we need a dummy device
3044          * for NAPI to work
3045          */
3046         init_dummy_netdev(&eth->dummy_dev);
3047         netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3048                        MTK_NAPI_WEIGHT);
3049         netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
3050                        MTK_NAPI_WEIGHT);
3051 
3052         platform_set_drvdata(pdev, eth);
3053 
3054         return 0;
3055 
3056 err_deinit_mdio:
3057         mtk_mdio_cleanup(eth);
3058 err_free_dev:
3059         mtk_free_dev(eth);
3060 err_deinit_hw:
3061         mtk_hw_deinit(eth);
3062 
3063         return err;
3064 }
3065 
3066 static int mtk_remove(struct platform_device *pdev)
3067 {
3068         struct mtk_eth *eth = platform_get_drvdata(pdev);
3069         struct mtk_mac *mac;
3070         int i;
3071 
3072         /* stop all devices to make sure that dma is properly shut down */
3073         for (i = 0; i < MTK_MAC_COUNT; i++) {
3074                 if (!eth->netdev[i])
3075                         continue;
3076                 mtk_stop(eth->netdev[i]);
3077                 mac = netdev_priv(eth->netdev[i]);
3078                 phylink_disconnect_phy(mac->phylink);
3079         }
3080 
3081         mtk_hw_deinit(eth);
3082 
3083         netif_napi_del(&eth->tx_napi);
3084         netif_napi_del(&eth->rx_napi);
3085         mtk_cleanup(eth);
3086         mtk_mdio_cleanup(eth);
3087 
3088         return 0;
3089 }
3090 
3091 static const struct mtk_soc_data mt2701_data = {
3092         .caps = MT7623_CAPS | MTK_HWLRO,
3093         .hw_features = MTK_HW_FEATURES,
3094         .required_clks = MT7623_CLKS_BITMAP,
3095         .required_pctl = true,
3096 };
3097 
3098 static const struct mtk_soc_data mt7621_data = {
3099         .caps = MT7621_CAPS,
3100         .hw_features = MTK_HW_FEATURES,
3101         .required_clks = MT7621_CLKS_BITMAP,
3102         .required_pctl = false,
3103 };
3104 
3105 static const struct mtk_soc_data mt7622_data = {
3106         .ana_rgc3 = 0x2028,
3107         .caps = MT7622_CAPS | MTK_HWLRO,
3108         .hw_features = MTK_HW_FEATURES,
3109         .required_clks = MT7622_CLKS_BITMAP,
3110         .required_pctl = false,
3111 };
3112 
3113 static const struct mtk_soc_data mt7623_data = {
3114         .caps = MT7623_CAPS | MTK_HWLRO,
3115         .hw_features = MTK_HW_FEATURES,
3116         .required_clks = MT7623_CLKS_BITMAP,
3117         .required_pctl = true,
3118 };
3119 
3120 static const struct mtk_soc_data mt7629_data = {
3121         .ana_rgc3 = 0x128,
3122         .caps = MT7629_CAPS | MTK_HWLRO,
3123         .hw_features = MTK_HW_FEATURES,
3124         .required_clks = MT7629_CLKS_BITMAP,
3125         .required_pctl = false,
3126 };
3127 
3128 static const struct mtk_soc_data rt5350_data = {
3129         .caps = MT7628_CAPS,
3130         .hw_features = MTK_HW_FEATURES_MT7628,
3131         .required_clks = MT7628_CLKS_BITMAP,
3132         .required_pctl = false,
3133 };
3134 
3135 const struct of_device_id of_mtk_match[] = {
3136         { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3137         { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3138         { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3139         { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3140         { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3141         { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3142         {},
3143 };
3144 MODULE_DEVICE_TABLE(of, of_mtk_match);
3145 
3146 static struct platform_driver mtk_driver = {
3147         .probe = mtk_probe,
3148         .remove = mtk_remove,
3149         .driver = {
3150                 .name = "mtk_soc_eth",
3151                 .of_match_table = of_mtk_match,
3152         },
3153 };
3154 
3155 module_platform_driver(mtk_driver);
3156 
3157 MODULE_LICENSE("GPL");
3158 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3159 MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");

/* [<][>][^][v][top][bottom][index][help] */