1/* 2 * Broadcom BCM7xxx System Port Ethernet MAC driver 3 * 4 * Copyright (C) 2014 Broadcom Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13#include <linux/init.h> 14#include <linux/interrupt.h> 15#include <linux/module.h> 16#include <linux/kernel.h> 17#include <linux/netdevice.h> 18#include <linux/etherdevice.h> 19#include <linux/platform_device.h> 20#include <linux/of.h> 21#include <linux/of_net.h> 22#include <linux/of_mdio.h> 23#include <linux/phy.h> 24#include <linux/phy_fixed.h> 25#include <net/ip.h> 26#include <net/ipv6.h> 27 28#include "bcmsysport.h" 29 30/* I/O accessors register helpers */ 31#define BCM_SYSPORT_IO_MACRO(name, offset) \ 32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \ 33{ \ 34 u32 reg = __raw_readl(priv->base + offset + off); \ 35 return reg; \ 36} \ 37static inline void name##_writel(struct bcm_sysport_priv *priv, \ 38 u32 val, u32 off) \ 39{ \ 40 __raw_writel(val, priv->base + offset + off); \ 41} \ 42 43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET); 44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET); 45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET); 46BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET); 47BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET); 48BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET); 49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET); 50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET); 51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET); 52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET); 53 54/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied 55 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths. 56 */ 57#define BCM_SYSPORT_INTR_L2(which) \ 58static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \ 59 u32 mask) \ 60{ \ 61 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \ 62 priv->irq##which##_mask &= ~(mask); \ 63} \ 64static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \ 65 u32 mask) \ 66{ \ 67 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \ 68 priv->irq##which##_mask |= (mask); \ 69} \ 70 71BCM_SYSPORT_INTR_L2(0) 72BCM_SYSPORT_INTR_L2(1) 73 74/* Register accesses to GISB/RBUS registers are expensive (few hundred 75 * nanoseconds), so keep the check for 64-bits explicit here to save 76 * one register write per-packet on 32-bits platforms. 77 */ 78static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv, 79 void __iomem *d, 80 dma_addr_t addr) 81{ 82#ifdef CONFIG_PHYS_ADDR_T_64BIT 83 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK, 84 d + DESC_ADDR_HI_STATUS_LEN); 85#endif 86 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO); 87} 88 89static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, 90 struct dma_desc *desc, 91 unsigned int port) 92{ 93 /* Ports are latched, so write upper address first */ 94 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port)); 95 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port)); 96} 97 98/* Ethtool operations */ 99static int bcm_sysport_set_settings(struct net_device *dev, 100 struct ethtool_cmd *cmd) 101{ 102 struct bcm_sysport_priv *priv = netdev_priv(dev); 103 104 if (!netif_running(dev)) 105 return -EINVAL; 106 107 return phy_ethtool_sset(priv->phydev, cmd); 108} 109 110static int bcm_sysport_get_settings(struct net_device *dev, 111 struct ethtool_cmd *cmd) 112{ 113 struct bcm_sysport_priv *priv = netdev_priv(dev); 114 115 if (!netif_running(dev)) 116 return -EINVAL; 117 118 return phy_ethtool_gset(priv->phydev, cmd); 119} 120 121static int bcm_sysport_set_rx_csum(struct net_device *dev, 122 netdev_features_t wanted) 123{ 124 struct bcm_sysport_priv *priv = netdev_priv(dev); 125 u32 reg; 126 127 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); 128 reg = rxchk_readl(priv, RXCHK_CONTROL); 129 if (priv->rx_chk_en) 130 reg |= RXCHK_EN; 131 else 132 reg &= ~RXCHK_EN; 133 134 /* If UniMAC forwards CRC, we need to skip over it to get 135 * a valid CHK bit to be set in the per-packet status word 136 */ 137 if (priv->rx_chk_en && priv->crc_fwd) 138 reg |= RXCHK_SKIP_FCS; 139 else 140 reg &= ~RXCHK_SKIP_FCS; 141 142 /* If Broadcom tags are enabled (e.g: using a switch), make 143 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom 144 * tag after the Ethernet MAC Source Address. 145 */ 146 if (netdev_uses_dsa(dev)) 147 reg |= RXCHK_BRCM_TAG_EN; 148 else 149 reg &= ~RXCHK_BRCM_TAG_EN; 150 151 rxchk_writel(priv, reg, RXCHK_CONTROL); 152 153 return 0; 154} 155 156static int bcm_sysport_set_tx_csum(struct net_device *dev, 157 netdev_features_t wanted) 158{ 159 struct bcm_sysport_priv *priv = netdev_priv(dev); 160 u32 reg; 161 162 /* Hardware transmit checksum requires us to enable the Transmit status 163 * block prepended to the packet contents 164 */ 165 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 166 reg = tdma_readl(priv, TDMA_CONTROL); 167 if (priv->tsb_en) 168 reg |= TSB_EN; 169 else 170 reg &= ~TSB_EN; 171 tdma_writel(priv, reg, TDMA_CONTROL); 172 173 return 0; 174} 175 176static int bcm_sysport_set_features(struct net_device *dev, 177 netdev_features_t features) 178{ 179 netdev_features_t changed = features ^ dev->features; 180 netdev_features_t wanted = dev->wanted_features; 181 int ret = 0; 182 183 if (changed & NETIF_F_RXCSUM) 184 ret = bcm_sysport_set_rx_csum(dev, wanted); 185 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) 186 ret = bcm_sysport_set_tx_csum(dev, wanted); 187 188 return ret; 189} 190 191/* Hardware counters must be kept in sync because the order/offset 192 * is important here (order in structure declaration = order in hardware) 193 */ 194static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { 195 /* general stats */ 196 STAT_NETDEV(rx_packets), 197 STAT_NETDEV(tx_packets), 198 STAT_NETDEV(rx_bytes), 199 STAT_NETDEV(tx_bytes), 200 STAT_NETDEV(rx_errors), 201 STAT_NETDEV(tx_errors), 202 STAT_NETDEV(rx_dropped), 203 STAT_NETDEV(tx_dropped), 204 STAT_NETDEV(multicast), 205 /* UniMAC RSV counters */ 206 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64), 207 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127), 208 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255), 209 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511), 210 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023), 211 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518), 212 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv), 213 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047), 214 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095), 215 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216), 216 STAT_MIB_RX("rx_pkts", mib.rx.pkt), 217 STAT_MIB_RX("rx_bytes", mib.rx.bytes), 218 STAT_MIB_RX("rx_multicast", mib.rx.mca), 219 STAT_MIB_RX("rx_broadcast", mib.rx.bca), 220 STAT_MIB_RX("rx_fcs", mib.rx.fcs), 221 STAT_MIB_RX("rx_control", mib.rx.cf), 222 STAT_MIB_RX("rx_pause", mib.rx.pf), 223 STAT_MIB_RX("rx_unknown", mib.rx.uo), 224 STAT_MIB_RX("rx_align", mib.rx.aln), 225 STAT_MIB_RX("rx_outrange", mib.rx.flr), 226 STAT_MIB_RX("rx_code", mib.rx.cde), 227 STAT_MIB_RX("rx_carrier", mib.rx.fcr), 228 STAT_MIB_RX("rx_oversize", mib.rx.ovr), 229 STAT_MIB_RX("rx_jabber", mib.rx.jbr), 230 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue), 231 STAT_MIB_RX("rx_good_pkts", mib.rx.pok), 232 STAT_MIB_RX("rx_unicast", mib.rx.uc), 233 STAT_MIB_RX("rx_ppp", mib.rx.ppp), 234 STAT_MIB_RX("rx_crc", mib.rx.rcrc), 235 /* UniMAC TSV counters */ 236 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 237 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 238 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 239 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 240 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 241 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 242 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 243 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 244 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 245 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 246 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 247 STAT_MIB_TX("tx_multicast", mib.tx.mca), 248 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 249 STAT_MIB_TX("tx_pause", mib.tx.pf), 250 STAT_MIB_TX("tx_control", mib.tx.cf), 251 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 252 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 253 STAT_MIB_TX("tx_defer", mib.tx.drf), 254 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 255 STAT_MIB_TX("tx_single_col", mib.tx.scl), 256 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 257 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 258 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 259 STAT_MIB_TX("tx_frags", mib.tx.frg), 260 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 261 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 262 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 263 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 264 STAT_MIB_TX("tx_unicast", mib.tx.uc), 265 /* UniMAC RUNT counters */ 266 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt), 267 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs), 268 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align), 269 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes), 270 /* RXCHK misc statistics */ 271 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR), 272 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc, 273 RXCHK_OTHER_DISC_CNTR), 274 /* RBUF misc statistics */ 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), 280}; 281 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 283 284static void bcm_sysport_get_drvinfo(struct net_device *dev, 285 struct ethtool_drvinfo *info) 286{ 287 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 288 strlcpy(info->version, "0.1", sizeof(info->version)); 289 strlcpy(info->bus_info, "platform", sizeof(info->bus_info)); 290 info->n_stats = BCM_SYSPORT_STATS_LEN; 291} 292 293static u32 bcm_sysport_get_msglvl(struct net_device *dev) 294{ 295 struct bcm_sysport_priv *priv = netdev_priv(dev); 296 297 return priv->msg_enable; 298} 299 300static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable) 301{ 302 struct bcm_sysport_priv *priv = netdev_priv(dev); 303 304 priv->msg_enable = enable; 305} 306 307static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set) 308{ 309 switch (string_set) { 310 case ETH_SS_STATS: 311 return BCM_SYSPORT_STATS_LEN; 312 default: 313 return -EOPNOTSUPP; 314 } 315} 316 317static void bcm_sysport_get_strings(struct net_device *dev, 318 u32 stringset, u8 *data) 319{ 320 int i; 321 322 switch (stringset) { 323 case ETH_SS_STATS: 324 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 325 memcpy(data + i * ETH_GSTRING_LEN, 326 bcm_sysport_gstrings_stats[i].stat_string, 327 ETH_GSTRING_LEN); 328 } 329 break; 330 default: 331 break; 332 } 333} 334 335static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) 336{ 337 int i, j = 0; 338 339 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 340 const struct bcm_sysport_stats *s; 341 u8 offset = 0; 342 u32 val = 0; 343 char *p; 344 345 s = &bcm_sysport_gstrings_stats[i]; 346 switch (s->type) { 347 case BCM_SYSPORT_STAT_NETDEV: 348 case BCM_SYSPORT_STAT_SOFT: 349 continue; 350 case BCM_SYSPORT_STAT_MIB_RX: 351 case BCM_SYSPORT_STAT_MIB_TX: 352 case BCM_SYSPORT_STAT_RUNT: 353 if (s->type != BCM_SYSPORT_STAT_MIB_RX) 354 offset = UMAC_MIB_STAT_OFFSET; 355 val = umac_readl(priv, UMAC_MIB_START + j + offset); 356 break; 357 case BCM_SYSPORT_STAT_RXCHK: 358 val = rxchk_readl(priv, s->reg_offset); 359 if (val == ~0) 360 rxchk_writel(priv, 0, s->reg_offset); 361 break; 362 case BCM_SYSPORT_STAT_RBUF: 363 val = rbuf_readl(priv, s->reg_offset); 364 if (val == ~0) 365 rbuf_writel(priv, 0, s->reg_offset); 366 break; 367 } 368 369 j += s->stat_sizeof; 370 p = (char *)priv + s->stat_offset; 371 *(u32 *)p = val; 372 } 373 374 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 375} 376 377static void bcm_sysport_get_stats(struct net_device *dev, 378 struct ethtool_stats *stats, u64 *data) 379{ 380 struct bcm_sysport_priv *priv = netdev_priv(dev); 381 int i; 382 383 if (netif_running(dev)) 384 bcm_sysport_update_mib_counters(priv); 385 386 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 387 const struct bcm_sysport_stats *s; 388 char *p; 389 390 s = &bcm_sysport_gstrings_stats[i]; 391 if (s->type == BCM_SYSPORT_STAT_NETDEV) 392 p = (char *)&dev->stats; 393 else 394 p = (char *)priv; 395 p += s->stat_offset; 396 data[i] = *(u32 *)p; 397 } 398} 399 400static void bcm_sysport_get_wol(struct net_device *dev, 401 struct ethtool_wolinfo *wol) 402{ 403 struct bcm_sysport_priv *priv = netdev_priv(dev); 404 u32 reg; 405 406 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE; 407 wol->wolopts = priv->wolopts; 408 409 if (!(priv->wolopts & WAKE_MAGICSECURE)) 410 return; 411 412 /* Return the programmed SecureOn password */ 413 reg = umac_readl(priv, UMAC_PSW_MS); 414 put_unaligned_be16(reg, &wol->sopass[0]); 415 reg = umac_readl(priv, UMAC_PSW_LS); 416 put_unaligned_be32(reg, &wol->sopass[2]); 417} 418 419static int bcm_sysport_set_wol(struct net_device *dev, 420 struct ethtool_wolinfo *wol) 421{ 422 struct bcm_sysport_priv *priv = netdev_priv(dev); 423 struct device *kdev = &priv->pdev->dev; 424 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE; 425 426 if (!device_can_wakeup(kdev)) 427 return -ENOTSUPP; 428 429 if (wol->wolopts & ~supported) 430 return -EINVAL; 431 432 /* Program the SecureOn password */ 433 if (wol->wolopts & WAKE_MAGICSECURE) { 434 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 435 UMAC_PSW_MS); 436 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 437 UMAC_PSW_LS); 438 } 439 440 /* Flag the device and relevant IRQ as wakeup capable */ 441 if (wol->wolopts) { 442 device_set_wakeup_enable(kdev, 1); 443 if (priv->wol_irq_disabled) 444 enable_irq_wake(priv->wol_irq); 445 priv->wol_irq_disabled = 0; 446 } else { 447 device_set_wakeup_enable(kdev, 0); 448 /* Avoid unbalanced disable_irq_wake calls */ 449 if (!priv->wol_irq_disabled) 450 disable_irq_wake(priv->wol_irq); 451 priv->wol_irq_disabled = 1; 452 } 453 454 priv->wolopts = wol->wolopts; 455 456 return 0; 457} 458 459static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb) 460{ 461 dev_kfree_skb_any(cb->skb); 462 cb->skb = NULL; 463 dma_unmap_addr_set(cb, dma_addr, 0); 464} 465 466static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv, 467 struct bcm_sysport_cb *cb) 468{ 469 struct device *kdev = &priv->pdev->dev; 470 struct net_device *ndev = priv->netdev; 471 dma_addr_t mapping; 472 int ret; 473 474 cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH); 475 if (!cb->skb) { 476 netif_err(priv, rx_err, ndev, "SKB alloc failed\n"); 477 return -ENOMEM; 478 } 479 480 mapping = dma_map_single(kdev, cb->skb->data, 481 RX_BUF_LENGTH, DMA_FROM_DEVICE); 482 ret = dma_mapping_error(kdev, mapping); 483 if (ret) { 484 priv->mib.rx_dma_failed++; 485 bcm_sysport_free_cb(cb); 486 netif_err(priv, rx_err, ndev, "DMA mapping failure\n"); 487 return ret; 488 } 489 490 dma_unmap_addr_set(cb, dma_addr, mapping); 491 dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping); 492 493 priv->rx_bd_assign_index++; 494 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1); 495 priv->rx_bd_assign_ptr = priv->rx_bds + 496 (priv->rx_bd_assign_index * DESC_SIZE); 497 498 netif_dbg(priv, rx_status, ndev, "RX refill\n"); 499 500 return 0; 501} 502 503static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) 504{ 505 struct bcm_sysport_cb *cb; 506 int ret = 0; 507 unsigned int i; 508 509 for (i = 0; i < priv->num_rx_bds; i++) { 510 cb = &priv->rx_cbs[priv->rx_bd_assign_index]; 511 if (cb->skb) 512 continue; 513 514 ret = bcm_sysport_rx_refill(priv, cb); 515 if (ret) 516 break; 517 } 518 519 return ret; 520} 521 522/* Poll the hardware for up to budget packets to process */ 523static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, 524 unsigned int budget) 525{ 526 struct device *kdev = &priv->pdev->dev; 527 struct net_device *ndev = priv->netdev; 528 unsigned int processed = 0, to_process; 529 struct bcm_sysport_cb *cb; 530 struct sk_buff *skb; 531 unsigned int p_index; 532 u16 len, status; 533 struct bcm_rsb *rsb; 534 int ret; 535 536 /* Determine how much we should process since last call */ 537 p_index = rdma_readl(priv, RDMA_PROD_INDEX); 538 p_index &= RDMA_PROD_INDEX_MASK; 539 540 if (p_index < priv->rx_c_index) 541 to_process = (RDMA_CONS_INDEX_MASK + 1) - 542 priv->rx_c_index + p_index; 543 else 544 to_process = p_index - priv->rx_c_index; 545 546 netif_dbg(priv, rx_status, ndev, 547 "p_index=%d rx_c_index=%d to_process=%d\n", 548 p_index, priv->rx_c_index, to_process); 549 550 while ((processed < to_process) && (processed < budget)) { 551 cb = &priv->rx_cbs[priv->rx_read_ptr]; 552 skb = cb->skb; 553 554 processed++; 555 priv->rx_read_ptr++; 556 557 if (priv->rx_read_ptr == priv->num_rx_bds) 558 priv->rx_read_ptr = 0; 559 560 /* We do not have a backing SKB, so we do not a corresponding 561 * DMA mapping for this incoming packet since 562 * bcm_sysport_rx_refill always either has both skb and mapping 563 * or none. 564 */ 565 if (unlikely(!skb)) { 566 netif_err(priv, rx_err, ndev, "out of memory!\n"); 567 ndev->stats.rx_dropped++; 568 ndev->stats.rx_errors++; 569 goto refill; 570 } 571 572 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 573 RX_BUF_LENGTH, DMA_FROM_DEVICE); 574 575 /* Extract the Receive Status Block prepended */ 576 rsb = (struct bcm_rsb *)skb->data; 577 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK; 578 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 579 DESC_STATUS_MASK; 580 581 netif_dbg(priv, rx_status, ndev, 582 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 583 p_index, priv->rx_c_index, priv->rx_read_ptr, 584 len, status); 585 586 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 587 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 588 ndev->stats.rx_dropped++; 589 ndev->stats.rx_errors++; 590 bcm_sysport_free_cb(cb); 591 goto refill; 592 } 593 594 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) { 595 netif_err(priv, rx_err, ndev, "error packet\n"); 596 if (status & RX_STATUS_OVFLOW) 597 ndev->stats.rx_over_errors++; 598 ndev->stats.rx_dropped++; 599 ndev->stats.rx_errors++; 600 bcm_sysport_free_cb(cb); 601 goto refill; 602 } 603 604 skb_put(skb, len); 605 606 /* Hardware validated our checksum */ 607 if (likely(status & DESC_L4_CSUM)) 608 skb->ip_summed = CHECKSUM_UNNECESSARY; 609 610 /* Hardware pre-pends packets with 2bytes before Ethernet 611 * header plus we have the Receive Status Block, strip off all 612 * of this from the SKB. 613 */ 614 skb_pull(skb, sizeof(*rsb) + 2); 615 len -= (sizeof(*rsb) + 2); 616 617 /* UniMAC may forward CRC */ 618 if (priv->crc_fwd) { 619 skb_trim(skb, len - ETH_FCS_LEN); 620 len -= ETH_FCS_LEN; 621 } 622 623 skb->protocol = eth_type_trans(skb, ndev); 624 ndev->stats.rx_packets++; 625 ndev->stats.rx_bytes += len; 626 627 napi_gro_receive(&priv->napi, skb); 628refill: 629 ret = bcm_sysport_rx_refill(priv, cb); 630 if (ret) 631 priv->mib.alloc_rx_buff_failed++; 632 } 633 634 return processed; 635} 636 637static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv, 638 struct bcm_sysport_cb *cb, 639 unsigned int *bytes_compl, 640 unsigned int *pkts_compl) 641{ 642 struct device *kdev = &priv->pdev->dev; 643 struct net_device *ndev = priv->netdev; 644 645 if (cb->skb) { 646 ndev->stats.tx_bytes += cb->skb->len; 647 *bytes_compl += cb->skb->len; 648 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 649 dma_unmap_len(cb, dma_len), 650 DMA_TO_DEVICE); 651 ndev->stats.tx_packets++; 652 (*pkts_compl)++; 653 bcm_sysport_free_cb(cb); 654 /* SKB fragment */ 655 } else if (dma_unmap_addr(cb, dma_addr)) { 656 ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len); 657 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), 658 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); 659 dma_unmap_addr_set(cb, dma_addr, 0); 660 } 661} 662 663/* Reclaim queued SKBs for transmission completion, lockless version */ 664static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 665 struct bcm_sysport_tx_ring *ring) 666{ 667 struct net_device *ndev = priv->netdev; 668 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; 669 unsigned int pkts_compl = 0, bytes_compl = 0; 670 struct bcm_sysport_cb *cb; 671 struct netdev_queue *txq; 672 u32 hw_ind; 673 674 txq = netdev_get_tx_queue(ndev, ring->index); 675 676 /* Compute how many descriptors have been processed since last call */ 677 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 678 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 679 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 680 681 last_c_index = ring->c_index; 682 num_tx_cbs = ring->size; 683 684 c_index &= (num_tx_cbs - 1); 685 686 if (c_index >= last_c_index) 687 last_tx_cn = c_index - last_c_index; 688 else 689 last_tx_cn = num_tx_cbs - last_c_index + c_index; 690 691 netif_dbg(priv, tx_done, ndev, 692 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 693 ring->index, c_index, last_tx_cn, last_c_index); 694 695 while (last_tx_cn-- > 0) { 696 cb = ring->cbs + last_c_index; 697 bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); 698 699 ring->desc_count++; 700 last_c_index++; 701 last_c_index &= (num_tx_cbs - 1); 702 } 703 704 ring->c_index = c_index; 705 706 if (netif_tx_queue_stopped(txq) && pkts_compl) 707 netif_tx_wake_queue(txq); 708 709 netif_dbg(priv, tx_done, ndev, 710 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n", 711 ring->index, ring->c_index, pkts_compl, bytes_compl); 712 713 return pkts_compl; 714} 715 716/* Locked version of the per-ring TX reclaim routine */ 717static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 718 struct bcm_sysport_tx_ring *ring) 719{ 720 unsigned int released; 721 unsigned long flags; 722 723 spin_lock_irqsave(&ring->lock, flags); 724 released = __bcm_sysport_tx_reclaim(priv, ring); 725 spin_unlock_irqrestore(&ring->lock, flags); 726 727 return released; 728} 729 730static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) 731{ 732 struct bcm_sysport_tx_ring *ring = 733 container_of(napi, struct bcm_sysport_tx_ring, napi); 734 unsigned int work_done = 0; 735 736 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 737 738 if (work_done == 0) { 739 napi_complete(napi); 740 /* re-enable TX interrupt */ 741 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 742 743 return 0; 744 } 745 746 return budget; 747} 748 749static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 750{ 751 unsigned int q; 752 753 for (q = 0; q < priv->netdev->num_tx_queues; q++) 754 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); 755} 756 757static int bcm_sysport_poll(struct napi_struct *napi, int budget) 758{ 759 struct bcm_sysport_priv *priv = 760 container_of(napi, struct bcm_sysport_priv, napi); 761 unsigned int work_done = 0; 762 763 work_done = bcm_sysport_desc_rx(priv, budget); 764 765 priv->rx_c_index += work_done; 766 priv->rx_c_index &= RDMA_CONS_INDEX_MASK; 767 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); 768 769 if (work_done < budget) { 770 napi_complete(napi); 771 /* re-enable RX interrupts */ 772 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); 773 } 774 775 return work_done; 776} 777 778static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) 779{ 780 u32 reg; 781 782 /* Stop monitoring MPD interrupt */ 783 intrl2_0_mask_set(priv, INTRL2_0_MPD); 784 785 /* Clear the MagicPacket detection logic */ 786 reg = umac_readl(priv, UMAC_MPD_CTRL); 787 reg &= ~MPD_EN; 788 umac_writel(priv, reg, UMAC_MPD_CTRL); 789 790 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); 791} 792 793/* RX and misc interrupt routine */ 794static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) 795{ 796 struct net_device *dev = dev_id; 797 struct bcm_sysport_priv *priv = netdev_priv(dev); 798 799 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & 800 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); 801 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR); 802 803 if (unlikely(priv->irq0_stat == 0)) { 804 netdev_warn(priv->netdev, "spurious RX interrupt\n"); 805 return IRQ_NONE; 806 } 807 808 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { 809 if (likely(napi_schedule_prep(&priv->napi))) { 810 /* disable RX interrupts */ 811 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); 812 __napi_schedule(&priv->napi); 813 } 814 } 815 816 /* TX ring is full, perform a full reclaim since we do not know 817 * which one would trigger this interrupt 818 */ 819 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL) 820 bcm_sysport_tx_reclaim_all(priv); 821 822 if (priv->irq0_stat & INTRL2_0_MPD) { 823 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n"); 824 bcm_sysport_resume_from_wol(priv); 825 } 826 827 return IRQ_HANDLED; 828} 829 830/* TX interrupt service routine */ 831static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) 832{ 833 struct net_device *dev = dev_id; 834 struct bcm_sysport_priv *priv = netdev_priv(dev); 835 struct bcm_sysport_tx_ring *txr; 836 unsigned int ring; 837 838 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & 839 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); 840 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 841 842 if (unlikely(priv->irq1_stat == 0)) { 843 netdev_warn(priv->netdev, "spurious TX interrupt\n"); 844 return IRQ_NONE; 845 } 846 847 for (ring = 0; ring < dev->num_tx_queues; ring++) { 848 if (!(priv->irq1_stat & BIT(ring))) 849 continue; 850 851 txr = &priv->tx_rings[ring]; 852 853 if (likely(napi_schedule_prep(&txr->napi))) { 854 intrl2_1_mask_set(priv, BIT(ring)); 855 __napi_schedule(&txr->napi); 856 } 857 } 858 859 return IRQ_HANDLED; 860} 861 862static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id) 863{ 864 struct bcm_sysport_priv *priv = dev_id; 865 866 pm_wakeup_event(&priv->pdev->dev, 0); 867 868 return IRQ_HANDLED; 869} 870 871static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, 872 struct net_device *dev) 873{ 874 struct sk_buff *nskb; 875 struct bcm_tsb *tsb; 876 u32 csum_info; 877 u8 ip_proto; 878 u16 csum_start; 879 u16 ip_ver; 880 881 /* Re-allocate SKB if needed */ 882 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { 883 nskb = skb_realloc_headroom(skb, sizeof(*tsb)); 884 dev_kfree_skb(skb); 885 if (!nskb) { 886 dev->stats.tx_errors++; 887 dev->stats.tx_dropped++; 888 return NULL; 889 } 890 skb = nskb; 891 } 892 893 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb)); 894 /* Zero-out TSB by default */ 895 memset(tsb, 0, sizeof(*tsb)); 896 897 if (skb->ip_summed == CHECKSUM_PARTIAL) { 898 ip_ver = htons(skb->protocol); 899 switch (ip_ver) { 900 case ETH_P_IP: 901 ip_proto = ip_hdr(skb)->protocol; 902 break; 903 case ETH_P_IPV6: 904 ip_proto = ipv6_hdr(skb)->nexthdr; 905 break; 906 default: 907 return skb; 908 } 909 910 /* Get the checksum offset and the L4 (transport) offset */ 911 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); 912 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; 913 csum_info |= (csum_start << L4_PTR_SHIFT); 914 915 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { 916 csum_info |= L4_LENGTH_VALID; 917 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) 918 csum_info |= L4_UDP; 919 } else { 920 csum_info = 0; 921 } 922 923 tsb->l4_ptr_dest_map = csum_info; 924 } 925 926 return skb; 927} 928 929static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, 930 struct net_device *dev) 931{ 932 struct bcm_sysport_priv *priv = netdev_priv(dev); 933 struct device *kdev = &priv->pdev->dev; 934 struct bcm_sysport_tx_ring *ring; 935 struct bcm_sysport_cb *cb; 936 struct netdev_queue *txq; 937 struct dma_desc *desc; 938 unsigned int skb_len; 939 unsigned long flags; 940 dma_addr_t mapping; 941 u32 len_status; 942 u16 queue; 943 int ret; 944 945 queue = skb_get_queue_mapping(skb); 946 txq = netdev_get_tx_queue(dev, queue); 947 ring = &priv->tx_rings[queue]; 948 949 /* lock against tx reclaim in BH context and TX ring full interrupt */ 950 spin_lock_irqsave(&ring->lock, flags); 951 if (unlikely(ring->desc_count == 0)) { 952 netif_tx_stop_queue(txq); 953 netdev_err(dev, "queue %d awake and ring full!\n", queue); 954 ret = NETDEV_TX_BUSY; 955 goto out; 956 } 957 958 /* Insert TSB and checksum infos */ 959 if (priv->tsb_en) { 960 skb = bcm_sysport_insert_tsb(skb, dev); 961 if (!skb) { 962 ret = NETDEV_TX_OK; 963 goto out; 964 } 965 } 966 967 /* The Ethernet switch we are interfaced with needs packets to be at 968 * least 64 bytes (including FCS) otherwise they will be discarded when 969 * they enter the switch port logic. When Broadcom tags are enabled, we 970 * need to make sure that packets are at least 68 bytes 971 * (including FCS and tag) because the length verification is done after 972 * the Broadcom tag is stripped off the ingress packet. 973 */ 974 if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { 975 ret = NETDEV_TX_OK; 976 goto out; 977 } 978 979 skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ? 980 ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len; 981 982 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); 983 if (dma_mapping_error(kdev, mapping)) { 984 priv->mib.tx_dma_failed++; 985 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n", 986 skb->data, skb_len); 987 ret = NETDEV_TX_OK; 988 goto out; 989 } 990 991 /* Remember the SKB for future freeing */ 992 cb = &ring->cbs[ring->curr_desc]; 993 cb->skb = skb; 994 dma_unmap_addr_set(cb, dma_addr, mapping); 995 dma_unmap_len_set(cb, dma_len, skb_len); 996 997 /* Fetch a descriptor entry from our pool */ 998 desc = ring->desc_cpu; 999 1000 desc->addr_lo = lower_32_bits(mapping); 1001 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; 1002 len_status |= (skb_len << DESC_LEN_SHIFT); 1003 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) << 1004 DESC_STATUS_SHIFT; 1005 if (skb->ip_summed == CHECKSUM_PARTIAL) 1006 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); 1007 1008 ring->curr_desc++; 1009 if (ring->curr_desc == ring->size) 1010 ring->curr_desc = 0; 1011 ring->desc_count--; 1012 1013 /* Ensure write completion of the descriptor status/length 1014 * in DRAM before the System Port WRITE_PORT register latches 1015 * the value 1016 */ 1017 wmb(); 1018 desc->addr_status_len = len_status; 1019 wmb(); 1020 1021 /* Write this descriptor address to the RING write port */ 1022 tdma_port_write_desc_addr(priv, desc, ring->index); 1023 1024 /* Check ring space and update SW control flow */ 1025 if (ring->desc_count == 0) 1026 netif_tx_stop_queue(txq); 1027 1028 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n", 1029 ring->index, ring->desc_count, ring->curr_desc); 1030 1031 ret = NETDEV_TX_OK; 1032out: 1033 spin_unlock_irqrestore(&ring->lock, flags); 1034 return ret; 1035} 1036 1037static void bcm_sysport_tx_timeout(struct net_device *dev) 1038{ 1039 netdev_warn(dev, "transmit timeout!\n"); 1040 1041 dev->trans_start = jiffies; 1042 dev->stats.tx_errors++; 1043 1044 netif_tx_wake_all_queues(dev); 1045} 1046 1047/* phylib adjust link callback */ 1048static void bcm_sysport_adj_link(struct net_device *dev) 1049{ 1050 struct bcm_sysport_priv *priv = netdev_priv(dev); 1051 struct phy_device *phydev = priv->phydev; 1052 unsigned int changed = 0; 1053 u32 cmd_bits = 0, reg; 1054 1055 if (priv->old_link != phydev->link) { 1056 changed = 1; 1057 priv->old_link = phydev->link; 1058 } 1059 1060 if (priv->old_duplex != phydev->duplex) { 1061 changed = 1; 1062 priv->old_duplex = phydev->duplex; 1063 } 1064 1065 switch (phydev->speed) { 1066 case SPEED_2500: 1067 cmd_bits = CMD_SPEED_2500; 1068 break; 1069 case SPEED_1000: 1070 cmd_bits = CMD_SPEED_1000; 1071 break; 1072 case SPEED_100: 1073 cmd_bits = CMD_SPEED_100; 1074 break; 1075 case SPEED_10: 1076 cmd_bits = CMD_SPEED_10; 1077 break; 1078 default: 1079 break; 1080 } 1081 cmd_bits <<= CMD_SPEED_SHIFT; 1082 1083 if (phydev->duplex == DUPLEX_HALF) 1084 cmd_bits |= CMD_HD_EN; 1085 1086 if (priv->old_pause != phydev->pause) { 1087 changed = 1; 1088 priv->old_pause = phydev->pause; 1089 } 1090 1091 if (!phydev->pause) 1092 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; 1093 1094 if (!changed) 1095 return; 1096 1097 if (phydev->link) { 1098 reg = umac_readl(priv, UMAC_CMD); 1099 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | 1100 CMD_HD_EN | CMD_RX_PAUSE_IGNORE | 1101 CMD_TX_PAUSE_IGNORE); 1102 reg |= cmd_bits; 1103 umac_writel(priv, reg, UMAC_CMD); 1104 } 1105 1106 phy_print_status(priv->phydev); 1107} 1108 1109static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, 1110 unsigned int index) 1111{ 1112 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1113 struct device *kdev = &priv->pdev->dev; 1114 size_t size; 1115 void *p; 1116 u32 reg; 1117 1118 /* Simple descriptors partitioning for now */ 1119 size = 256; 1120 1121 /* We just need one DMA descriptor which is DMA-able, since writing to 1122 * the port will allocate a new descriptor in its internal linked-list 1123 */ 1124 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1125 GFP_KERNEL); 1126 if (!p) { 1127 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1128 return -ENOMEM; 1129 } 1130 1131 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL); 1132 if (!ring->cbs) { 1133 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1134 return -ENOMEM; 1135 } 1136 1137 /* Initialize SW view of the ring */ 1138 spin_lock_init(&ring->lock); 1139 ring->priv = priv; 1140 netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1141 ring->index = index; 1142 ring->size = size; 1143 ring->alloc_size = ring->size; 1144 ring->desc_cpu = p; 1145 ring->desc_count = ring->size; 1146 ring->curr_desc = 0; 1147 1148 /* Initialize HW ring */ 1149 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index)); 1150 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); 1151 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); 1152 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); 1153 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); 1154 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); 1155 1156 /* Program the number of descriptors as MAX_THRESHOLD and half of 1157 * its size for the hysteresis trigger 1158 */ 1159 tdma_writel(priv, ring->size | 1160 1 << RING_HYST_THRESH_SHIFT, 1161 TDMA_DESC_RING_MAX_HYST(index)); 1162 1163 /* Enable the ring queue in the arbiter */ 1164 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN); 1165 reg |= (1 << index); 1166 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); 1167 1168 napi_enable(&ring->napi); 1169 1170 netif_dbg(priv, hw, priv->netdev, 1171 "TDMA cfg, size=%d, desc_cpu=%p\n", 1172 ring->size, ring->desc_cpu); 1173 1174 return 0; 1175} 1176 1177static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, 1178 unsigned int index) 1179{ 1180 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index]; 1181 struct device *kdev = &priv->pdev->dev; 1182 u32 reg; 1183 1184 /* Caller should stop the TDMA engine */ 1185 reg = tdma_readl(priv, TDMA_STATUS); 1186 if (!(reg & TDMA_DISABLED)) 1187 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1188 1189 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could 1190 * fail, so by checking this pointer we know whether the TX ring was 1191 * fully initialized or not. 1192 */ 1193 if (!ring->cbs) 1194 return; 1195 1196 napi_disable(&ring->napi); 1197 netif_napi_del(&ring->napi); 1198 1199 bcm_sysport_tx_reclaim(priv, ring); 1200 1201 kfree(ring->cbs); 1202 ring->cbs = NULL; 1203 1204 if (ring->desc_dma) { 1205 dma_free_coherent(kdev, sizeof(struct dma_desc), 1206 ring->desc_cpu, ring->desc_dma); 1207 ring->desc_dma = 0; 1208 } 1209 ring->size = 0; 1210 ring->alloc_size = 0; 1211 1212 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n"); 1213} 1214 1215/* RDMA helper */ 1216static inline int rdma_enable_set(struct bcm_sysport_priv *priv, 1217 unsigned int enable) 1218{ 1219 unsigned int timeout = 1000; 1220 u32 reg; 1221 1222 reg = rdma_readl(priv, RDMA_CONTROL); 1223 if (enable) 1224 reg |= RDMA_EN; 1225 else 1226 reg &= ~RDMA_EN; 1227 rdma_writel(priv, reg, RDMA_CONTROL); 1228 1229 /* Poll for RMDA disabling completion */ 1230 do { 1231 reg = rdma_readl(priv, RDMA_STATUS); 1232 if (!!(reg & RDMA_DISABLED) == !enable) 1233 return 0; 1234 usleep_range(1000, 2000); 1235 } while (timeout-- > 0); 1236 1237 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n"); 1238 1239 return -ETIMEDOUT; 1240} 1241 1242/* TDMA helper */ 1243static inline int tdma_enable_set(struct bcm_sysport_priv *priv, 1244 unsigned int enable) 1245{ 1246 unsigned int timeout = 1000; 1247 u32 reg; 1248 1249 reg = tdma_readl(priv, TDMA_CONTROL); 1250 if (enable) 1251 reg |= TDMA_EN; 1252 else 1253 reg &= ~TDMA_EN; 1254 tdma_writel(priv, reg, TDMA_CONTROL); 1255 1256 /* Poll for TMDA disabling completion */ 1257 do { 1258 reg = tdma_readl(priv, TDMA_STATUS); 1259 if (!!(reg & TDMA_DISABLED) == !enable) 1260 return 0; 1261 1262 usleep_range(1000, 2000); 1263 } while (timeout-- > 0); 1264 1265 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n"); 1266 1267 return -ETIMEDOUT; 1268} 1269 1270static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) 1271{ 1272 u32 reg; 1273 int ret; 1274 1275 /* Initialize SW view of the RX ring */ 1276 priv->num_rx_bds = NUM_RX_DESC; 1277 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET; 1278 priv->rx_bd_assign_ptr = priv->rx_bds; 1279 priv->rx_bd_assign_index = 0; 1280 priv->rx_c_index = 0; 1281 priv->rx_read_ptr = 0; 1282 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb), 1283 GFP_KERNEL); 1284 if (!priv->rx_cbs) { 1285 netif_err(priv, hw, priv->netdev, "CB allocation failed\n"); 1286 return -ENOMEM; 1287 } 1288 1289 ret = bcm_sysport_alloc_rx_bufs(priv); 1290 if (ret) { 1291 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n"); 1292 return ret; 1293 } 1294 1295 /* Initialize HW, ensure RDMA is disabled */ 1296 reg = rdma_readl(priv, RDMA_STATUS); 1297 if (!(reg & RDMA_DISABLED)) 1298 rdma_enable_set(priv, 0); 1299 1300 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO); 1301 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI); 1302 rdma_writel(priv, 0, RDMA_PROD_INDEX); 1303 rdma_writel(priv, 0, RDMA_CONS_INDEX); 1304 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT | 1305 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE); 1306 /* Operate the queue in ring mode */ 1307 rdma_writel(priv, 0, RDMA_START_ADDR_HI); 1308 rdma_writel(priv, 0, RDMA_START_ADDR_LO); 1309 rdma_writel(priv, 0, RDMA_END_ADDR_HI); 1310 rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO); 1311 1312 rdma_writel(priv, 1, RDMA_MBDONE_INTR); 1313 1314 netif_dbg(priv, hw, priv->netdev, 1315 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", 1316 priv->num_rx_bds, priv->rx_bds); 1317 1318 return 0; 1319} 1320 1321static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv) 1322{ 1323 struct bcm_sysport_cb *cb; 1324 unsigned int i; 1325 u32 reg; 1326 1327 /* Caller should ensure RDMA is disabled */ 1328 reg = rdma_readl(priv, RDMA_STATUS); 1329 if (!(reg & RDMA_DISABLED)) 1330 netdev_warn(priv->netdev, "RDMA not stopped!\n"); 1331 1332 for (i = 0; i < priv->num_rx_bds; i++) { 1333 cb = &priv->rx_cbs[i]; 1334 if (dma_unmap_addr(cb, dma_addr)) 1335 dma_unmap_single(&priv->pdev->dev, 1336 dma_unmap_addr(cb, dma_addr), 1337 RX_BUF_LENGTH, DMA_FROM_DEVICE); 1338 bcm_sysport_free_cb(cb); 1339 } 1340 1341 kfree(priv->rx_cbs); 1342 priv->rx_cbs = NULL; 1343 1344 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n"); 1345} 1346 1347static void bcm_sysport_set_rx_mode(struct net_device *dev) 1348{ 1349 struct bcm_sysport_priv *priv = netdev_priv(dev); 1350 u32 reg; 1351 1352 reg = umac_readl(priv, UMAC_CMD); 1353 if (dev->flags & IFF_PROMISC) 1354 reg |= CMD_PROMISC; 1355 else 1356 reg &= ~CMD_PROMISC; 1357 umac_writel(priv, reg, UMAC_CMD); 1358 1359 /* No support for ALLMULTI */ 1360 if (dev->flags & IFF_ALLMULTI) 1361 return; 1362} 1363 1364static inline void umac_enable_set(struct bcm_sysport_priv *priv, 1365 u32 mask, unsigned int enable) 1366{ 1367 u32 reg; 1368 1369 reg = umac_readl(priv, UMAC_CMD); 1370 if (enable) 1371 reg |= mask; 1372 else 1373 reg &= ~mask; 1374 umac_writel(priv, reg, UMAC_CMD); 1375 1376 /* UniMAC stops on a packet boundary, wait for a full-sized packet 1377 * to be processed (1 msec). 1378 */ 1379 if (enable == 0) 1380 usleep_range(1000, 2000); 1381} 1382 1383static inline void umac_reset(struct bcm_sysport_priv *priv) 1384{ 1385 u32 reg; 1386 1387 reg = umac_readl(priv, UMAC_CMD); 1388 reg |= CMD_SW_RESET; 1389 umac_writel(priv, reg, UMAC_CMD); 1390 udelay(10); 1391 reg = umac_readl(priv, UMAC_CMD); 1392 reg &= ~CMD_SW_RESET; 1393 umac_writel(priv, reg, UMAC_CMD); 1394} 1395 1396static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1397 unsigned char *addr) 1398{ 1399 umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) | 1400 (addr[2] << 8) | addr[3], UMAC_MAC0); 1401 umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1); 1402} 1403 1404static void topctrl_flush(struct bcm_sysport_priv *priv) 1405{ 1406 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1407 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1408 mdelay(1); 1409 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1410 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1411} 1412 1413static int bcm_sysport_change_mac(struct net_device *dev, void *p) 1414{ 1415 struct bcm_sysport_priv *priv = netdev_priv(dev); 1416 struct sockaddr *addr = p; 1417 1418 if (!is_valid_ether_addr(addr->sa_data)) 1419 return -EINVAL; 1420 1421 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1422 1423 /* interface is disabled, changes to MAC will be reflected on next 1424 * open call 1425 */ 1426 if (!netif_running(dev)) 1427 return 0; 1428 1429 umac_set_hw_addr(priv, dev->dev_addr); 1430 1431 return 0; 1432} 1433 1434static void bcm_sysport_netif_start(struct net_device *dev) 1435{ 1436 struct bcm_sysport_priv *priv = netdev_priv(dev); 1437 1438 /* Enable NAPI */ 1439 napi_enable(&priv->napi); 1440 1441 /* Enable RX interrupt and TX ring full interrupt */ 1442 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1443 1444 phy_start(priv->phydev); 1445 1446 /* Enable TX interrupts for the 32 TXQs */ 1447 intrl2_1_mask_clear(priv, 0xffffffff); 1448 1449 /* Last call before we start the real business */ 1450 netif_tx_start_all_queues(dev); 1451} 1452 1453static void rbuf_init(struct bcm_sysport_priv *priv) 1454{ 1455 u32 reg; 1456 1457 reg = rbuf_readl(priv, RBUF_CONTROL); 1458 reg |= RBUF_4B_ALGN | RBUF_RSB_EN; 1459 rbuf_writel(priv, reg, RBUF_CONTROL); 1460} 1461 1462static int bcm_sysport_open(struct net_device *dev) 1463{ 1464 struct bcm_sysport_priv *priv = netdev_priv(dev); 1465 unsigned int i; 1466 int ret; 1467 1468 /* Reset UniMAC */ 1469 umac_reset(priv); 1470 1471 /* Flush TX and RX FIFOs at TOPCTRL level */ 1472 topctrl_flush(priv); 1473 1474 /* Disable the UniMAC RX/TX */ 1475 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 1476 1477 /* Enable RBUF 2bytes alignment and Receive Status Block */ 1478 rbuf_init(priv); 1479 1480 /* Set maximum frame length */ 1481 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1482 1483 /* Set MAC address */ 1484 umac_set_hw_addr(priv, dev->dev_addr); 1485 1486 /* Read CRC forward */ 1487 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1488 1489 priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1490 0, priv->phy_interface); 1491 if (!priv->phydev) { 1492 netdev_err(dev, "could not attach to PHY\n"); 1493 return -ENODEV; 1494 } 1495 1496 /* Reset house keeping link status */ 1497 priv->old_duplex = -1; 1498 priv->old_link = -1; 1499 priv->old_pause = -1; 1500 1501 /* mask all interrupts and request them */ 1502 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1503 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1504 intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1505 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 1506 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1507 intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); 1508 1509 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev); 1510 if (ret) { 1511 netdev_err(dev, "failed to request RX interrupt\n"); 1512 goto out_phy_disconnect; 1513 } 1514 1515 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev); 1516 if (ret) { 1517 netdev_err(dev, "failed to request TX interrupt\n"); 1518 goto out_free_irq0; 1519 } 1520 1521 /* Initialize both hardware and software ring */ 1522 for (i = 0; i < dev->num_tx_queues; i++) { 1523 ret = bcm_sysport_init_tx_ring(priv, i); 1524 if (ret) { 1525 netdev_err(dev, "failed to initialize TX ring %d\n", 1526 i); 1527 goto out_free_tx_ring; 1528 } 1529 } 1530 1531 /* Initialize linked-list */ 1532 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1533 1534 /* Initialize RX ring */ 1535 ret = bcm_sysport_init_rx_ring(priv); 1536 if (ret) { 1537 netdev_err(dev, "failed to initialize RX ring\n"); 1538 goto out_free_rx_ring; 1539 } 1540 1541 /* Turn on RDMA */ 1542 ret = rdma_enable_set(priv, 1); 1543 if (ret) 1544 goto out_free_rx_ring; 1545 1546 /* Turn on TDMA */ 1547 ret = tdma_enable_set(priv, 1); 1548 if (ret) 1549 goto out_clear_rx_int; 1550 1551 /* Turn on UniMAC TX/RX */ 1552 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1); 1553 1554 bcm_sysport_netif_start(dev); 1555 1556 return 0; 1557 1558out_clear_rx_int: 1559 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); 1560out_free_rx_ring: 1561 bcm_sysport_fini_rx_ring(priv); 1562out_free_tx_ring: 1563 for (i = 0; i < dev->num_tx_queues; i++) 1564 bcm_sysport_fini_tx_ring(priv, i); 1565 free_irq(priv->irq1, dev); 1566out_free_irq0: 1567 free_irq(priv->irq0, dev); 1568out_phy_disconnect: 1569 phy_disconnect(priv->phydev); 1570 return ret; 1571} 1572 1573static void bcm_sysport_netif_stop(struct net_device *dev) 1574{ 1575 struct bcm_sysport_priv *priv = netdev_priv(dev); 1576 1577 /* stop all software from updating hardware */ 1578 netif_tx_stop_all_queues(dev); 1579 napi_disable(&priv->napi); 1580 phy_stop(priv->phydev); 1581 1582 /* mask all interrupts */ 1583 intrl2_0_mask_set(priv, 0xffffffff); 1584 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1585 intrl2_1_mask_set(priv, 0xffffffff); 1586 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 1587} 1588 1589static int bcm_sysport_stop(struct net_device *dev) 1590{ 1591 struct bcm_sysport_priv *priv = netdev_priv(dev); 1592 unsigned int i; 1593 int ret; 1594 1595 bcm_sysport_netif_stop(dev); 1596 1597 /* Disable UniMAC RX */ 1598 umac_enable_set(priv, CMD_RX_EN, 0); 1599 1600 ret = tdma_enable_set(priv, 0); 1601 if (ret) { 1602 netdev_err(dev, "timeout disabling RDMA\n"); 1603 return ret; 1604 } 1605 1606 /* Wait for a maximum packet size to be drained */ 1607 usleep_range(2000, 3000); 1608 1609 ret = rdma_enable_set(priv, 0); 1610 if (ret) { 1611 netdev_err(dev, "timeout disabling TDMA\n"); 1612 return ret; 1613 } 1614 1615 /* Disable UniMAC TX */ 1616 umac_enable_set(priv, CMD_TX_EN, 0); 1617 1618 /* Free RX/TX rings SW structures */ 1619 for (i = 0; i < dev->num_tx_queues; i++) 1620 bcm_sysport_fini_tx_ring(priv, i); 1621 bcm_sysport_fini_rx_ring(priv); 1622 1623 free_irq(priv->irq0, dev); 1624 free_irq(priv->irq1, dev); 1625 1626 /* Disconnect from PHY */ 1627 phy_disconnect(priv->phydev); 1628 1629 return 0; 1630} 1631 1632static struct ethtool_ops bcm_sysport_ethtool_ops = { 1633 .get_settings = bcm_sysport_get_settings, 1634 .set_settings = bcm_sysport_set_settings, 1635 .get_drvinfo = bcm_sysport_get_drvinfo, 1636 .get_msglevel = bcm_sysport_get_msglvl, 1637 .set_msglevel = bcm_sysport_set_msglvl, 1638 .get_link = ethtool_op_get_link, 1639 .get_strings = bcm_sysport_get_strings, 1640 .get_ethtool_stats = bcm_sysport_get_stats, 1641 .get_sset_count = bcm_sysport_get_sset_count, 1642 .get_wol = bcm_sysport_get_wol, 1643 .set_wol = bcm_sysport_set_wol, 1644}; 1645 1646static const struct net_device_ops bcm_sysport_netdev_ops = { 1647 .ndo_start_xmit = bcm_sysport_xmit, 1648 .ndo_tx_timeout = bcm_sysport_tx_timeout, 1649 .ndo_open = bcm_sysport_open, 1650 .ndo_stop = bcm_sysport_stop, 1651 .ndo_set_features = bcm_sysport_set_features, 1652 .ndo_set_rx_mode = bcm_sysport_set_rx_mode, 1653 .ndo_set_mac_address = bcm_sysport_change_mac, 1654}; 1655 1656#define REV_FMT "v%2x.%02x" 1657 1658static int bcm_sysport_probe(struct platform_device *pdev) 1659{ 1660 struct bcm_sysport_priv *priv; 1661 struct device_node *dn; 1662 struct net_device *dev; 1663 const void *macaddr; 1664 struct resource *r; 1665 u32 txq, rxq; 1666 int ret; 1667 1668 dn = pdev->dev.of_node; 1669 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1670 1671 /* Read the Transmit/Receive Queue properties */ 1672 if (of_property_read_u32(dn, "systemport,num-txq", &txq)) 1673 txq = TDMA_NUM_RINGS; 1674 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) 1675 rxq = 1; 1676 1677 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); 1678 if (!dev) 1679 return -ENOMEM; 1680 1681 /* Initialize private members */ 1682 priv = netdev_priv(dev); 1683 1684 priv->irq0 = platform_get_irq(pdev, 0); 1685 priv->irq1 = platform_get_irq(pdev, 1); 1686 priv->wol_irq = platform_get_irq(pdev, 2); 1687 if (priv->irq0 <= 0 || priv->irq1 <= 0) { 1688 dev_err(&pdev->dev, "invalid interrupts\n"); 1689 ret = -EINVAL; 1690 goto err; 1691 } 1692 1693 priv->base = devm_ioremap_resource(&pdev->dev, r); 1694 if (IS_ERR(priv->base)) { 1695 ret = PTR_ERR(priv->base); 1696 goto err; 1697 } 1698 1699 priv->netdev = dev; 1700 priv->pdev = pdev; 1701 1702 priv->phy_interface = of_get_phy_mode(dn); 1703 /* Default to GMII interface mode */ 1704 if (priv->phy_interface < 0) 1705 priv->phy_interface = PHY_INTERFACE_MODE_GMII; 1706 1707 /* In the case of a fixed PHY, the DT node associated 1708 * to the PHY is the Ethernet MAC DT node. 1709 */ 1710 if (of_phy_is_fixed_link(dn)) { 1711 ret = of_phy_register_fixed_link(dn); 1712 if (ret) { 1713 dev_err(&pdev->dev, "failed to register fixed PHY\n"); 1714 goto err; 1715 } 1716 1717 priv->phy_dn = dn; 1718 } 1719 1720 /* Initialize netdevice members */ 1721 macaddr = of_get_mac_address(dn); 1722 if (!macaddr || !is_valid_ether_addr(macaddr)) { 1723 dev_warn(&pdev->dev, "using random Ethernet MAC\n"); 1724 random_ether_addr(dev->dev_addr); 1725 } else { 1726 ether_addr_copy(dev->dev_addr, macaddr); 1727 } 1728 1729 SET_NETDEV_DEV(dev, &pdev->dev); 1730 dev_set_drvdata(&pdev->dev, dev); 1731 dev->ethtool_ops = &bcm_sysport_ethtool_ops; 1732 dev->netdev_ops = &bcm_sysport_netdev_ops; 1733 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); 1734 1735 /* HW supported features, none enabled by default */ 1736 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | 1737 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1738 1739 /* Request the WOL interrupt and advertise suspend if available */ 1740 priv->wol_irq_disabled = 1; 1741 ret = devm_request_irq(&pdev->dev, priv->wol_irq, 1742 bcm_sysport_wol_isr, 0, dev->name, priv); 1743 if (!ret) 1744 device_set_wakeup_capable(&pdev->dev, 1); 1745 1746 /* Set the needed headroom once and for all */ 1747 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1748 dev->needed_headroom += sizeof(struct bcm_tsb); 1749 1750 /* libphy will adjust the link state accordingly */ 1751 netif_carrier_off(dev); 1752 1753 ret = register_netdev(dev); 1754 if (ret) { 1755 dev_err(&pdev->dev, "failed to register net_device\n"); 1756 goto err; 1757 } 1758 1759 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; 1760 dev_info(&pdev->dev, 1761 "Broadcom SYSTEMPORT" REV_FMT 1762 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n", 1763 (priv->rev >> 8) & 0xff, priv->rev & 0xff, 1764 priv->base, priv->irq0, priv->irq1, txq, rxq); 1765 1766 return 0; 1767err: 1768 free_netdev(dev); 1769 return ret; 1770} 1771 1772static int bcm_sysport_remove(struct platform_device *pdev) 1773{ 1774 struct net_device *dev = dev_get_drvdata(&pdev->dev); 1775 1776 /* Not much to do, ndo_close has been called 1777 * and we use managed allocations 1778 */ 1779 unregister_netdev(dev); 1780 free_netdev(dev); 1781 dev_set_drvdata(&pdev->dev, NULL); 1782 1783 return 0; 1784} 1785 1786#ifdef CONFIG_PM_SLEEP 1787static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) 1788{ 1789 struct net_device *ndev = priv->netdev; 1790 unsigned int timeout = 1000; 1791 u32 reg; 1792 1793 /* Password has already been programmed */ 1794 reg = umac_readl(priv, UMAC_MPD_CTRL); 1795 reg |= MPD_EN; 1796 reg &= ~PSW_EN; 1797 if (priv->wolopts & WAKE_MAGICSECURE) 1798 reg |= PSW_EN; 1799 umac_writel(priv, reg, UMAC_MPD_CTRL); 1800 1801 /* Make sure RBUF entered WoL mode as result */ 1802 do { 1803 reg = rbuf_readl(priv, RBUF_STATUS); 1804 if (reg & RBUF_WOL_MODE) 1805 break; 1806 1807 udelay(10); 1808 } while (timeout-- > 0); 1809 1810 /* Do not leave the UniMAC RBUF matching only MPD packets */ 1811 if (!timeout) { 1812 reg = umac_readl(priv, UMAC_MPD_CTRL); 1813 reg &= ~MPD_EN; 1814 umac_writel(priv, reg, UMAC_MPD_CTRL); 1815 netif_err(priv, wol, ndev, "failed to enter WOL mode\n"); 1816 return -ETIMEDOUT; 1817 } 1818 1819 /* UniMAC receive needs to be turned on */ 1820 umac_enable_set(priv, CMD_RX_EN, 1); 1821 1822 /* Enable the interrupt wake-up source */ 1823 intrl2_0_mask_clear(priv, INTRL2_0_MPD); 1824 1825 netif_dbg(priv, wol, ndev, "entered WOL mode\n"); 1826 1827 return 0; 1828} 1829 1830static int bcm_sysport_suspend(struct device *d) 1831{ 1832 struct net_device *dev = dev_get_drvdata(d); 1833 struct bcm_sysport_priv *priv = netdev_priv(dev); 1834 unsigned int i; 1835 int ret = 0; 1836 u32 reg; 1837 1838 if (!netif_running(dev)) 1839 return 0; 1840 1841 bcm_sysport_netif_stop(dev); 1842 1843 phy_suspend(priv->phydev); 1844 1845 netif_device_detach(dev); 1846 1847 /* Disable UniMAC RX */ 1848 umac_enable_set(priv, CMD_RX_EN, 0); 1849 1850 ret = rdma_enable_set(priv, 0); 1851 if (ret) { 1852 netdev_err(dev, "RDMA timeout!\n"); 1853 return ret; 1854 } 1855 1856 /* Disable RXCHK if enabled */ 1857 if (priv->rx_chk_en) { 1858 reg = rxchk_readl(priv, RXCHK_CONTROL); 1859 reg &= ~RXCHK_EN; 1860 rxchk_writel(priv, reg, RXCHK_CONTROL); 1861 } 1862 1863 /* Flush RX pipe */ 1864 if (!priv->wolopts) 1865 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL); 1866 1867 ret = tdma_enable_set(priv, 0); 1868 if (ret) { 1869 netdev_err(dev, "TDMA timeout!\n"); 1870 return ret; 1871 } 1872 1873 /* Wait for a packet boundary */ 1874 usleep_range(2000, 3000); 1875 1876 umac_enable_set(priv, CMD_TX_EN, 0); 1877 1878 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL); 1879 1880 /* Free RX/TX rings SW structures */ 1881 for (i = 0; i < dev->num_tx_queues; i++) 1882 bcm_sysport_fini_tx_ring(priv, i); 1883 bcm_sysport_fini_rx_ring(priv); 1884 1885 /* Get prepared for Wake-on-LAN */ 1886 if (device_may_wakeup(d) && priv->wolopts) 1887 ret = bcm_sysport_suspend_to_wol(priv); 1888 1889 return ret; 1890} 1891 1892static int bcm_sysport_resume(struct device *d) 1893{ 1894 struct net_device *dev = dev_get_drvdata(d); 1895 struct bcm_sysport_priv *priv = netdev_priv(dev); 1896 unsigned int i; 1897 u32 reg; 1898 int ret; 1899 1900 if (!netif_running(dev)) 1901 return 0; 1902 1903 umac_reset(priv); 1904 1905 /* We may have been suspended and never received a WOL event that 1906 * would turn off MPD detection, take care of that now 1907 */ 1908 bcm_sysport_resume_from_wol(priv); 1909 1910 /* Initialize both hardware and software ring */ 1911 for (i = 0; i < dev->num_tx_queues; i++) { 1912 ret = bcm_sysport_init_tx_ring(priv, i); 1913 if (ret) { 1914 netdev_err(dev, "failed to initialize TX ring %d\n", 1915 i); 1916 goto out_free_tx_rings; 1917 } 1918 } 1919 1920 /* Initialize linked-list */ 1921 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS); 1922 1923 /* Initialize RX ring */ 1924 ret = bcm_sysport_init_rx_ring(priv); 1925 if (ret) { 1926 netdev_err(dev, "failed to initialize RX ring\n"); 1927 goto out_free_rx_ring; 1928 } 1929 1930 netif_device_attach(dev); 1931 1932 /* RX pipe enable */ 1933 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1934 1935 ret = rdma_enable_set(priv, 1); 1936 if (ret) { 1937 netdev_err(dev, "failed to enable RDMA\n"); 1938 goto out_free_rx_ring; 1939 } 1940 1941 /* Enable rxhck */ 1942 if (priv->rx_chk_en) { 1943 reg = rxchk_readl(priv, RXCHK_CONTROL); 1944 reg |= RXCHK_EN; 1945 rxchk_writel(priv, reg, RXCHK_CONTROL); 1946 } 1947 1948 rbuf_init(priv); 1949 1950 /* Set maximum frame length */ 1951 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN); 1952 1953 /* Set MAC address */ 1954 umac_set_hw_addr(priv, dev->dev_addr); 1955 1956 umac_enable_set(priv, CMD_RX_EN, 1); 1957 1958 /* TX pipe enable */ 1959 topctrl_writel(priv, 0, TX_FLUSH_CNTL); 1960 1961 umac_enable_set(priv, CMD_TX_EN, 1); 1962 1963 ret = tdma_enable_set(priv, 1); 1964 if (ret) { 1965 netdev_err(dev, "TDMA timeout!\n"); 1966 goto out_free_rx_ring; 1967 } 1968 1969 phy_resume(priv->phydev); 1970 1971 bcm_sysport_netif_start(dev); 1972 1973 return 0; 1974 1975out_free_rx_ring: 1976 bcm_sysport_fini_rx_ring(priv); 1977out_free_tx_rings: 1978 for (i = 0; i < dev->num_tx_queues; i++) 1979 bcm_sysport_fini_tx_ring(priv, i); 1980 return ret; 1981} 1982#endif 1983 1984static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops, 1985 bcm_sysport_suspend, bcm_sysport_resume); 1986 1987static const struct of_device_id bcm_sysport_of_match[] = { 1988 { .compatible = "brcm,systemport-v1.00" }, 1989 { .compatible = "brcm,systemport" }, 1990 { /* sentinel */ } 1991}; 1992 1993static struct platform_driver bcm_sysport_driver = { 1994 .probe = bcm_sysport_probe, 1995 .remove = bcm_sysport_remove, 1996 .driver = { 1997 .name = "brcm-systemport", 1998 .of_match_table = bcm_sysport_of_match, 1999 .pm = &bcm_sysport_pm_ops, 2000 }, 2001}; 2002module_platform_driver(bcm_sysport_driver); 2003 2004MODULE_AUTHOR("Broadcom Corporation"); 2005MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver"); 2006MODULE_ALIAS("platform:brcm-systemport"); 2007MODULE_LICENSE("GPL"); 2008