root/drivers/net/ethernet/arc/emac_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. arc_emac_tx_avail
  2. arc_emac_adjust_link
  3. arc_emac_get_drvinfo
  4. arc_emac_tx_clean
  5. arc_emac_rx
  6. arc_emac_rx_miss_handle
  7. arc_emac_rx_stall_check
  8. arc_emac_poll
  9. arc_emac_intr
  10. arc_emac_poll_controller
  11. arc_emac_open
  12. arc_emac_set_rx_mode
  13. arc_free_tx_queue
  14. arc_free_rx_queue
  15. arc_emac_stop
  16. arc_emac_stats
  17. arc_emac_tx
  18. arc_emac_set_address_internal
  19. arc_emac_set_address
  20. arc_emac_ioctl
  21. arc_emac_restart
  22. arc_emac_probe
  23. arc_emac_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2004-2013 Synopsys, Inc. (www.synopsys.com)
   4  *
   5  * Driver for the ARC EMAC 10100 (hardware revision 5)
   6  *
   7  * Contributors:
   8  *              Amit Bhor
   9  *              Sameer Dhavale
  10  *              Vineet Gupta
  11  */
  12 
  13 #include <linux/crc32.h>
  14 #include <linux/etherdevice.h>
  15 #include <linux/interrupt.h>
  16 #include <linux/io.h>
  17 #include <linux/module.h>
  18 #include <linux/of_address.h>
  19 #include <linux/of_irq.h>
  20 #include <linux/of_mdio.h>
  21 #include <linux/of_net.h>
  22 #include <linux/of_platform.h>
  23 
  24 #include "emac.h"
  25 
  26 static void arc_emac_restart(struct net_device *ndev);
  27 
  28 /**
  29  * arc_emac_tx_avail - Return the number of available slots in the tx ring.
  30  * @priv: Pointer to ARC EMAC private data structure.
  31  *
  32  * returns: the number of slots available for transmission in tx the ring.
  33  */
  34 static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
  35 {
  36         return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
  37 }
  38 
  39 /**
  40  * arc_emac_adjust_link - Adjust the PHY link duplex.
  41  * @ndev:       Pointer to the net_device structure.
  42  *
  43  * This function is called to change the duplex setting after auto negotiation
  44  * is done by the PHY.
  45  */
  46 static void arc_emac_adjust_link(struct net_device *ndev)
  47 {
  48         struct arc_emac_priv *priv = netdev_priv(ndev);
  49         struct phy_device *phy_dev = ndev->phydev;
  50         unsigned int reg, state_changed = 0;
  51 
  52         if (priv->link != phy_dev->link) {
  53                 priv->link = phy_dev->link;
  54                 state_changed = 1;
  55         }
  56 
  57         if (priv->speed != phy_dev->speed) {
  58                 priv->speed = phy_dev->speed;
  59                 state_changed = 1;
  60                 if (priv->set_mac_speed)
  61                         priv->set_mac_speed(priv, priv->speed);
  62         }
  63 
  64         if (priv->duplex != phy_dev->duplex) {
  65                 reg = arc_reg_get(priv, R_CTRL);
  66 
  67                 if (phy_dev->duplex == DUPLEX_FULL)
  68                         reg |= ENFL_MASK;
  69                 else
  70                         reg &= ~ENFL_MASK;
  71 
  72                 arc_reg_set(priv, R_CTRL, reg);
  73                 priv->duplex = phy_dev->duplex;
  74                 state_changed = 1;
  75         }
  76 
  77         if (state_changed)
  78                 phy_print_status(phy_dev);
  79 }
  80 
  81 /**
  82  * arc_emac_get_drvinfo - Get EMAC driver information.
  83  * @ndev:       Pointer to net_device structure.
  84  * @info:       Pointer to ethtool_drvinfo structure.
  85  *
  86  * This implements ethtool command for getting the driver information.
  87  * Issue "ethtool -i ethX" under linux prompt to execute this function.
  88  */
  89 static void arc_emac_get_drvinfo(struct net_device *ndev,
  90                                  struct ethtool_drvinfo *info)
  91 {
  92         struct arc_emac_priv *priv = netdev_priv(ndev);
  93 
  94         strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
  95         strlcpy(info->version, priv->drv_version, sizeof(info->version));
  96 }
  97 
  98 static const struct ethtool_ops arc_emac_ethtool_ops = {
  99         .get_drvinfo    = arc_emac_get_drvinfo,
 100         .get_link       = ethtool_op_get_link,
 101         .get_link_ksettings = phy_ethtool_get_link_ksettings,
 102         .set_link_ksettings = phy_ethtool_set_link_ksettings,
 103 };
 104 
 105 #define FIRST_OR_LAST_MASK      (FIRST_MASK | LAST_MASK)
 106 
 107 /**
 108  * arc_emac_tx_clean - clears processed by EMAC Tx BDs.
 109  * @ndev:       Pointer to the network device.
 110  */
 111 static void arc_emac_tx_clean(struct net_device *ndev)
 112 {
 113         struct arc_emac_priv *priv = netdev_priv(ndev);
 114         struct net_device_stats *stats = &ndev->stats;
 115         unsigned int i;
 116 
 117         for (i = 0; i < TX_BD_NUM; i++) {
 118                 unsigned int *txbd_dirty = &priv->txbd_dirty;
 119                 struct arc_emac_bd *txbd = &priv->txbd[*txbd_dirty];
 120                 struct buffer_state *tx_buff = &priv->tx_buff[*txbd_dirty];
 121                 struct sk_buff *skb = tx_buff->skb;
 122                 unsigned int info = le32_to_cpu(txbd->info);
 123 
 124                 if ((info & FOR_EMAC) || !txbd->data || !skb)
 125                         break;
 126 
 127                 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
 128                         stats->tx_errors++;
 129                         stats->tx_dropped++;
 130 
 131                         if (info & DEFR)
 132                                 stats->tx_carrier_errors++;
 133 
 134                         if (info & LTCL)
 135                                 stats->collisions++;
 136 
 137                         if (info & UFLO)
 138                                 stats->tx_fifo_errors++;
 139                 } else if (likely(info & FIRST_OR_LAST_MASK)) {
 140                         stats->tx_packets++;
 141                         stats->tx_bytes += skb->len;
 142                 }
 143 
 144                 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
 145                                  dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
 146 
 147                 /* return the sk_buff to system */
 148                 dev_consume_skb_irq(skb);
 149 
 150                 txbd->data = 0;
 151                 txbd->info = 0;
 152                 tx_buff->skb = NULL;
 153 
 154                 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
 155         }
 156 
 157         /* Ensure that txbd_dirty is visible to tx() before checking
 158          * for queue stopped.
 159          */
 160         smp_mb();
 161 
 162         if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
 163                 netif_wake_queue(ndev);
 164 }
 165 
 166 /**
 167  * arc_emac_rx - processing of Rx packets.
 168  * @ndev:       Pointer to the network device.
 169  * @budget:     How many BDs to process on 1 call.
 170  *
 171  * returns:     Number of processed BDs
 172  *
 173  * Iterate through Rx BDs and deliver received packages to upper layer.
 174  */
 175 static int arc_emac_rx(struct net_device *ndev, int budget)
 176 {
 177         struct arc_emac_priv *priv = netdev_priv(ndev);
 178         unsigned int work_done;
 179 
 180         for (work_done = 0; work_done < budget; work_done++) {
 181                 unsigned int *last_rx_bd = &priv->last_rx_bd;
 182                 struct net_device_stats *stats = &ndev->stats;
 183                 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
 184                 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
 185                 unsigned int pktlen, info = le32_to_cpu(rxbd->info);
 186                 struct sk_buff *skb;
 187                 dma_addr_t addr;
 188 
 189                 if (unlikely((info & OWN_MASK) == FOR_EMAC))
 190                         break;
 191 
 192                 /* Make a note that we saw a packet at this BD.
 193                  * So next time, driver starts from this + 1
 194                  */
 195                 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
 196 
 197                 if (unlikely((info & FIRST_OR_LAST_MASK) !=
 198                              FIRST_OR_LAST_MASK)) {
 199                         /* We pre-allocate buffers of MTU size so incoming
 200                          * packets won't be split/chained.
 201                          */
 202                         if (net_ratelimit())
 203                                 netdev_err(ndev, "incomplete packet received\n");
 204 
 205                         /* Return ownership to EMAC */
 206                         rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 207                         stats->rx_errors++;
 208                         stats->rx_length_errors++;
 209                         continue;
 210                 }
 211 
 212                 /* Prepare the BD for next cycle. netif_receive_skb()
 213                  * only if new skb was allocated and mapped to avoid holes
 214                  * in the RX fifo.
 215                  */
 216                 skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
 217                 if (unlikely(!skb)) {
 218                         if (net_ratelimit())
 219                                 netdev_err(ndev, "cannot allocate skb\n");
 220                         /* Return ownership to EMAC */
 221                         rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 222                         stats->rx_errors++;
 223                         stats->rx_dropped++;
 224                         continue;
 225                 }
 226 
 227                 addr = dma_map_single(&ndev->dev, (void *)skb->data,
 228                                       EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
 229                 if (dma_mapping_error(&ndev->dev, addr)) {
 230                         if (net_ratelimit())
 231                                 netdev_err(ndev, "cannot map dma buffer\n");
 232                         dev_kfree_skb(skb);
 233                         /* Return ownership to EMAC */
 234                         rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 235                         stats->rx_errors++;
 236                         stats->rx_dropped++;
 237                         continue;
 238                 }
 239 
 240                 /* unmap previosly mapped skb */
 241                 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
 242                                  dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
 243 
 244                 pktlen = info & LEN_MASK;
 245                 stats->rx_packets++;
 246                 stats->rx_bytes += pktlen;
 247                 skb_put(rx_buff->skb, pktlen);
 248                 rx_buff->skb->dev = ndev;
 249                 rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
 250 
 251                 netif_receive_skb(rx_buff->skb);
 252 
 253                 rx_buff->skb = skb;
 254                 dma_unmap_addr_set(rx_buff, addr, addr);
 255                 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 256 
 257                 rxbd->data = cpu_to_le32(addr);
 258 
 259                 /* Make sure pointer to data buffer is set */
 260                 wmb();
 261 
 262                 /* Return ownership to EMAC */
 263                 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 264         }
 265 
 266         return work_done;
 267 }
 268 
 269 /**
 270  * arc_emac_rx_miss_handle - handle R_MISS register
 271  * @ndev:       Pointer to the net_device structure.
 272  */
 273 static void arc_emac_rx_miss_handle(struct net_device *ndev)
 274 {
 275         struct arc_emac_priv *priv = netdev_priv(ndev);
 276         struct net_device_stats *stats = &ndev->stats;
 277         unsigned int miss;
 278 
 279         miss = arc_reg_get(priv, R_MISS);
 280         if (miss) {
 281                 stats->rx_errors += miss;
 282                 stats->rx_missed_errors += miss;
 283                 priv->rx_missed_errors += miss;
 284         }
 285 }
 286 
 287 /**
 288  * arc_emac_rx_stall_check - check RX stall
 289  * @ndev:       Pointer to the net_device structure.
 290  * @budget:     How many BDs requested to process on 1 call.
 291  * @work_done:  How many BDs processed
 292  *
 293  * Under certain conditions EMAC stop reception of incoming packets and
 294  * continuously increment R_MISS register instead of saving data into
 295  * provided buffer. This function detect that condition and restart
 296  * EMAC.
 297  */
 298 static void arc_emac_rx_stall_check(struct net_device *ndev,
 299                                     int budget, unsigned int work_done)
 300 {
 301         struct arc_emac_priv *priv = netdev_priv(ndev);
 302         struct arc_emac_bd *rxbd;
 303 
 304         if (work_done)
 305                 priv->rx_missed_errors = 0;
 306 
 307         if (priv->rx_missed_errors && budget) {
 308                 rxbd = &priv->rxbd[priv->last_rx_bd];
 309                 if (le32_to_cpu(rxbd->info) & FOR_EMAC) {
 310                         arc_emac_restart(ndev);
 311                         priv->rx_missed_errors = 0;
 312                 }
 313         }
 314 }
 315 
 316 /**
 317  * arc_emac_poll - NAPI poll handler.
 318  * @napi:       Pointer to napi_struct structure.
 319  * @budget:     How many BDs to process on 1 call.
 320  *
 321  * returns:     Number of processed BDs
 322  */
 323 static int arc_emac_poll(struct napi_struct *napi, int budget)
 324 {
 325         struct net_device *ndev = napi->dev;
 326         struct arc_emac_priv *priv = netdev_priv(ndev);
 327         unsigned int work_done;
 328 
 329         arc_emac_tx_clean(ndev);
 330         arc_emac_rx_miss_handle(ndev);
 331 
 332         work_done = arc_emac_rx(ndev, budget);
 333         if (work_done < budget) {
 334                 napi_complete_done(napi, work_done);
 335                 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
 336         }
 337 
 338         arc_emac_rx_stall_check(ndev, budget, work_done);
 339 
 340         return work_done;
 341 }
 342 
 343 /**
 344  * arc_emac_intr - Global interrupt handler for EMAC.
 345  * @irq:                irq number.
 346  * @dev_instance:       device instance.
 347  *
 348  * returns: IRQ_HANDLED for all cases.
 349  *
 350  * ARC EMAC has only 1 interrupt line, and depending on bits raised in
 351  * STATUS register we may tell what is a reason for interrupt to fire.
 352  */
 353 static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
 354 {
 355         struct net_device *ndev = dev_instance;
 356         struct arc_emac_priv *priv = netdev_priv(ndev);
 357         struct net_device_stats *stats = &ndev->stats;
 358         unsigned int status;
 359 
 360         status = arc_reg_get(priv, R_STATUS);
 361         status &= ~MDIO_MASK;
 362 
 363         /* Reset all flags except "MDIO complete" */
 364         arc_reg_set(priv, R_STATUS, status);
 365 
 366         if (status & (RXINT_MASK | TXINT_MASK)) {
 367                 if (likely(napi_schedule_prep(&priv->napi))) {
 368                         arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
 369                         __napi_schedule(&priv->napi);
 370                 }
 371         }
 372 
 373         if (status & ERR_MASK) {
 374                 /* MSER/RXCR/RXFR/RXFL interrupt fires on corresponding
 375                  * 8-bit error counter overrun.
 376                  */
 377 
 378                 if (status & MSER_MASK) {
 379                         stats->rx_missed_errors += 0x100;
 380                         stats->rx_errors += 0x100;
 381                         priv->rx_missed_errors += 0x100;
 382                         napi_schedule(&priv->napi);
 383                 }
 384 
 385                 if (status & RXCR_MASK) {
 386                         stats->rx_crc_errors += 0x100;
 387                         stats->rx_errors += 0x100;
 388                 }
 389 
 390                 if (status & RXFR_MASK) {
 391                         stats->rx_frame_errors += 0x100;
 392                         stats->rx_errors += 0x100;
 393                 }
 394 
 395                 if (status & RXFL_MASK) {
 396                         stats->rx_over_errors += 0x100;
 397                         stats->rx_errors += 0x100;
 398                 }
 399         }
 400 
 401         return IRQ_HANDLED;
 402 }
 403 
 404 #ifdef CONFIG_NET_POLL_CONTROLLER
 405 static void arc_emac_poll_controller(struct net_device *dev)
 406 {
 407         disable_irq(dev->irq);
 408         arc_emac_intr(dev->irq, dev);
 409         enable_irq(dev->irq);
 410 }
 411 #endif
 412 
 413 /**
 414  * arc_emac_open - Open the network device.
 415  * @ndev:       Pointer to the network device.
 416  *
 417  * returns: 0, on success or non-zero error value on failure.
 418  *
 419  * This function sets the MAC address, requests and enables an IRQ
 420  * for the EMAC device and starts the Tx queue.
 421  * It also connects to the phy device.
 422  */
 423 static int arc_emac_open(struct net_device *ndev)
 424 {
 425         struct arc_emac_priv *priv = netdev_priv(ndev);
 426         struct phy_device *phy_dev = ndev->phydev;
 427         int i;
 428 
 429         phy_dev->autoneg = AUTONEG_ENABLE;
 430         phy_dev->speed = 0;
 431         phy_dev->duplex = 0;
 432         linkmode_and(phy_dev->advertising, phy_dev->advertising,
 433                      phy_dev->supported);
 434 
 435         priv->last_rx_bd = 0;
 436 
 437         /* Allocate and set buffers for Rx BD's */
 438         for (i = 0; i < RX_BD_NUM; i++) {
 439                 dma_addr_t addr;
 440                 unsigned int *last_rx_bd = &priv->last_rx_bd;
 441                 struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
 442                 struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
 443 
 444                 rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
 445                                                          EMAC_BUFFER_SIZE);
 446                 if (unlikely(!rx_buff->skb))
 447                         return -ENOMEM;
 448 
 449                 addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
 450                                       EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
 451                 if (dma_mapping_error(&ndev->dev, addr)) {
 452                         netdev_err(ndev, "cannot dma map\n");
 453                         dev_kfree_skb(rx_buff->skb);
 454                         return -ENOMEM;
 455                 }
 456                 dma_unmap_addr_set(rx_buff, addr, addr);
 457                 dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
 458 
 459                 rxbd->data = cpu_to_le32(addr);
 460 
 461                 /* Make sure pointer to data buffer is set */
 462                 wmb();
 463 
 464                 /* Return ownership to EMAC */
 465                 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 466 
 467                 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
 468         }
 469 
 470         priv->txbd_curr = 0;
 471         priv->txbd_dirty = 0;
 472 
 473         /* Clean Tx BD's */
 474         memset(priv->txbd, 0, TX_RING_SZ);
 475 
 476         /* Initialize logical address filter */
 477         arc_reg_set(priv, R_LAFL, 0);
 478         arc_reg_set(priv, R_LAFH, 0);
 479 
 480         /* Set BD ring pointers for device side */
 481         arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
 482         arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
 483 
 484         /* Enable interrupts */
 485         arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 486 
 487         /* Set CONTROL */
 488         arc_reg_set(priv, R_CTRL,
 489                     (RX_BD_NUM << 24) | /* RX BD table length */
 490                     (TX_BD_NUM << 16) | /* TX BD table length */
 491                     TXRN_MASK | RXRN_MASK);
 492 
 493         napi_enable(&priv->napi);
 494 
 495         /* Enable EMAC */
 496         arc_reg_or(priv, R_CTRL, EN_MASK);
 497 
 498         phy_start(ndev->phydev);
 499 
 500         netif_start_queue(ndev);
 501 
 502         return 0;
 503 }
 504 
 505 /**
 506  * arc_emac_set_rx_mode - Change the receive filtering mode.
 507  * @ndev:       Pointer to the network device.
 508  *
 509  * This function enables/disables promiscuous or all-multicast mode
 510  * and updates the multicast filtering list of the network device.
 511  */
 512 static void arc_emac_set_rx_mode(struct net_device *ndev)
 513 {
 514         struct arc_emac_priv *priv = netdev_priv(ndev);
 515 
 516         if (ndev->flags & IFF_PROMISC) {
 517                 arc_reg_or(priv, R_CTRL, PROM_MASK);
 518         } else {
 519                 arc_reg_clr(priv, R_CTRL, PROM_MASK);
 520 
 521                 if (ndev->flags & IFF_ALLMULTI) {
 522                         arc_reg_set(priv, R_LAFL, ~0);
 523                         arc_reg_set(priv, R_LAFH, ~0);
 524                 } else if (ndev->flags & IFF_MULTICAST) {
 525                         struct netdev_hw_addr *ha;
 526                         unsigned int filter[2] = { 0, 0 };
 527                         int bit;
 528 
 529                         netdev_for_each_mc_addr(ha, ndev) {
 530                                 bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
 531                                 filter[bit >> 5] |= 1 << (bit & 31);
 532                         }
 533 
 534                         arc_reg_set(priv, R_LAFL, filter[0]);
 535                         arc_reg_set(priv, R_LAFH, filter[1]);
 536                 } else {
 537                         arc_reg_set(priv, R_LAFL, 0);
 538                         arc_reg_set(priv, R_LAFH, 0);
 539                 }
 540         }
 541 }
 542 
 543 /**
 544  * arc_free_tx_queue - free skb from tx queue
 545  * @ndev:       Pointer to the network device.
 546  *
 547  * This function must be called while EMAC disable
 548  */
 549 static void arc_free_tx_queue(struct net_device *ndev)
 550 {
 551         struct arc_emac_priv *priv = netdev_priv(ndev);
 552         unsigned int i;
 553 
 554         for (i = 0; i < TX_BD_NUM; i++) {
 555                 struct arc_emac_bd *txbd = &priv->txbd[i];
 556                 struct buffer_state *tx_buff = &priv->tx_buff[i];
 557 
 558                 if (tx_buff->skb) {
 559                         dma_unmap_single(&ndev->dev,
 560                                          dma_unmap_addr(tx_buff, addr),
 561                                          dma_unmap_len(tx_buff, len),
 562                                          DMA_TO_DEVICE);
 563 
 564                         /* return the sk_buff to system */
 565                         dev_kfree_skb_irq(tx_buff->skb);
 566                 }
 567 
 568                 txbd->info = 0;
 569                 txbd->data = 0;
 570                 tx_buff->skb = NULL;
 571         }
 572 }
 573 
 574 /**
 575  * arc_free_rx_queue - free skb from rx queue
 576  * @ndev:       Pointer to the network device.
 577  *
 578  * This function must be called while EMAC disable
 579  */
 580 static void arc_free_rx_queue(struct net_device *ndev)
 581 {
 582         struct arc_emac_priv *priv = netdev_priv(ndev);
 583         unsigned int i;
 584 
 585         for (i = 0; i < RX_BD_NUM; i++) {
 586                 struct arc_emac_bd *rxbd = &priv->rxbd[i];
 587                 struct buffer_state *rx_buff = &priv->rx_buff[i];
 588 
 589                 if (rx_buff->skb) {
 590                         dma_unmap_single(&ndev->dev,
 591                                          dma_unmap_addr(rx_buff, addr),
 592                                          dma_unmap_len(rx_buff, len),
 593                                          DMA_FROM_DEVICE);
 594 
 595                         /* return the sk_buff to system */
 596                         dev_kfree_skb_irq(rx_buff->skb);
 597                 }
 598 
 599                 rxbd->info = 0;
 600                 rxbd->data = 0;
 601                 rx_buff->skb = NULL;
 602         }
 603 }
 604 
 605 /**
 606  * arc_emac_stop - Close the network device.
 607  * @ndev:       Pointer to the network device.
 608  *
 609  * This function stops the Tx queue, disables interrupts and frees the IRQ for
 610  * the EMAC device.
 611  * It also disconnects the PHY device associated with the EMAC device.
 612  */
 613 static int arc_emac_stop(struct net_device *ndev)
 614 {
 615         struct arc_emac_priv *priv = netdev_priv(ndev);
 616 
 617         napi_disable(&priv->napi);
 618         netif_stop_queue(ndev);
 619 
 620         phy_stop(ndev->phydev);
 621 
 622         /* Disable interrupts */
 623         arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 624 
 625         /* Disable EMAC */
 626         arc_reg_clr(priv, R_CTRL, EN_MASK);
 627 
 628         /* Return the sk_buff to system */
 629         arc_free_tx_queue(ndev);
 630         arc_free_rx_queue(ndev);
 631 
 632         return 0;
 633 }
 634 
 635 /**
 636  * arc_emac_stats - Get system network statistics.
 637  * @ndev:       Pointer to net_device structure.
 638  *
 639  * Returns the address of the device statistics structure.
 640  * Statistics are updated in interrupt handler.
 641  */
 642 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
 643 {
 644         struct arc_emac_priv *priv = netdev_priv(ndev);
 645         struct net_device_stats *stats = &ndev->stats;
 646         unsigned long miss, rxerr;
 647         u8 rxcrc, rxfram, rxoflow;
 648 
 649         rxerr = arc_reg_get(priv, R_RXERR);
 650         miss = arc_reg_get(priv, R_MISS);
 651 
 652         rxcrc = rxerr;
 653         rxfram = rxerr >> 8;
 654         rxoflow = rxerr >> 16;
 655 
 656         stats->rx_errors += miss;
 657         stats->rx_errors += rxcrc + rxfram + rxoflow;
 658 
 659         stats->rx_over_errors += rxoflow;
 660         stats->rx_frame_errors += rxfram;
 661         stats->rx_crc_errors += rxcrc;
 662         stats->rx_missed_errors += miss;
 663 
 664         return stats;
 665 }
 666 
 667 /**
 668  * arc_emac_tx - Starts the data transmission.
 669  * @skb:        sk_buff pointer that contains data to be Transmitted.
 670  * @ndev:       Pointer to net_device structure.
 671  *
 672  * returns: NETDEV_TX_OK, on success
 673  *              NETDEV_TX_BUSY, if any of the descriptors are not free.
 674  *
 675  * This function is invoked from upper layers to initiate transmission.
 676  */
 677 static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 678 {
 679         struct arc_emac_priv *priv = netdev_priv(ndev);
 680         unsigned int len, *txbd_curr = &priv->txbd_curr;
 681         struct net_device_stats *stats = &ndev->stats;
 682         __le32 *info = &priv->txbd[*txbd_curr].info;
 683         dma_addr_t addr;
 684 
 685         if (skb_padto(skb, ETH_ZLEN))
 686                 return NETDEV_TX_OK;
 687 
 688         len = max_t(unsigned int, ETH_ZLEN, skb->len);
 689 
 690         if (unlikely(!arc_emac_tx_avail(priv))) {
 691                 netif_stop_queue(ndev);
 692                 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
 693                 return NETDEV_TX_BUSY;
 694         }
 695 
 696         addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
 697                               DMA_TO_DEVICE);
 698 
 699         if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
 700                 stats->tx_dropped++;
 701                 stats->tx_errors++;
 702                 dev_kfree_skb_any(skb);
 703                 return NETDEV_TX_OK;
 704         }
 705         dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
 706         dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
 707 
 708         priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
 709 
 710         /* Make sure pointer to data buffer is set */
 711         wmb();
 712 
 713         skb_tx_timestamp(skb);
 714 
 715         *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
 716 
 717         /* Make sure info word is set */
 718         wmb();
 719 
 720         priv->tx_buff[*txbd_curr].skb = skb;
 721 
 722         /* Increment index to point to the next BD */
 723         *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
 724 
 725         /* Ensure that tx_clean() sees the new txbd_curr before
 726          * checking the queue status. This prevents an unneeded wake
 727          * of the queue in tx_clean().
 728          */
 729         smp_mb();
 730 
 731         if (!arc_emac_tx_avail(priv)) {
 732                 netif_stop_queue(ndev);
 733                 /* Refresh tx_dirty */
 734                 smp_mb();
 735                 if (arc_emac_tx_avail(priv))
 736                         netif_start_queue(ndev);
 737         }
 738 
 739         arc_reg_set(priv, R_STATUS, TXPL_MASK);
 740 
 741         return NETDEV_TX_OK;
 742 }
 743 
 744 static void arc_emac_set_address_internal(struct net_device *ndev)
 745 {
 746         struct arc_emac_priv *priv = netdev_priv(ndev);
 747         unsigned int addr_low, addr_hi;
 748 
 749         addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]);
 750         addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]);
 751 
 752         arc_reg_set(priv, R_ADDRL, addr_low);
 753         arc_reg_set(priv, R_ADDRH, addr_hi);
 754 }
 755 
 756 /**
 757  * arc_emac_set_address - Set the MAC address for this device.
 758  * @ndev:       Pointer to net_device structure.
 759  * @p:          6 byte Address to be written as MAC address.
 760  *
 761  * This function copies the HW address from the sockaddr structure to the
 762  * net_device structure and updates the address in HW.
 763  *
 764  * returns:     -EBUSY if the net device is busy or 0 if the address is set
 765  *              successfully.
 766  */
 767 static int arc_emac_set_address(struct net_device *ndev, void *p)
 768 {
 769         struct sockaddr *addr = p;
 770 
 771         if (netif_running(ndev))
 772                 return -EBUSY;
 773 
 774         if (!is_valid_ether_addr(addr->sa_data))
 775                 return -EADDRNOTAVAIL;
 776 
 777         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 778 
 779         arc_emac_set_address_internal(ndev);
 780 
 781         return 0;
 782 }
 783 
 784 static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 785 {
 786         if (!netif_running(dev))
 787                 return -EINVAL;
 788 
 789         if (!dev->phydev)
 790                 return -ENODEV;
 791 
 792         return phy_mii_ioctl(dev->phydev, rq, cmd);
 793 }
 794 
 795 
 796 /**
 797  * arc_emac_restart - Restart EMAC
 798  * @ndev:       Pointer to net_device structure.
 799  *
 800  * This function do hardware reset of EMAC in order to restore
 801  * network packets reception.
 802  */
 803 static void arc_emac_restart(struct net_device *ndev)
 804 {
 805         struct arc_emac_priv *priv = netdev_priv(ndev);
 806         struct net_device_stats *stats = &ndev->stats;
 807         int i;
 808 
 809         if (net_ratelimit())
 810                 netdev_warn(ndev, "restarting stalled EMAC\n");
 811 
 812         netif_stop_queue(ndev);
 813 
 814         /* Disable interrupts */
 815         arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 816 
 817         /* Disable EMAC */
 818         arc_reg_clr(priv, R_CTRL, EN_MASK);
 819 
 820         /* Return the sk_buff to system */
 821         arc_free_tx_queue(ndev);
 822 
 823         /* Clean Tx BD's */
 824         priv->txbd_curr = 0;
 825         priv->txbd_dirty = 0;
 826         memset(priv->txbd, 0, TX_RING_SZ);
 827 
 828         for (i = 0; i < RX_BD_NUM; i++) {
 829                 struct arc_emac_bd *rxbd = &priv->rxbd[i];
 830                 unsigned int info = le32_to_cpu(rxbd->info);
 831 
 832                 if (!(info & FOR_EMAC)) {
 833                         stats->rx_errors++;
 834                         stats->rx_dropped++;
 835                 }
 836                 /* Return ownership to EMAC */
 837                 rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
 838         }
 839         priv->last_rx_bd = 0;
 840 
 841         /* Make sure info is visible to EMAC before enable */
 842         wmb();
 843 
 844         /* Enable interrupts */
 845         arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
 846 
 847         /* Enable EMAC */
 848         arc_reg_or(priv, R_CTRL, EN_MASK);
 849 
 850         netif_start_queue(ndev);
 851 }
 852 
 853 static const struct net_device_ops arc_emac_netdev_ops = {
 854         .ndo_open               = arc_emac_open,
 855         .ndo_stop               = arc_emac_stop,
 856         .ndo_start_xmit         = arc_emac_tx,
 857         .ndo_set_mac_address    = arc_emac_set_address,
 858         .ndo_get_stats          = arc_emac_stats,
 859         .ndo_set_rx_mode        = arc_emac_set_rx_mode,
 860         .ndo_do_ioctl           = arc_emac_ioctl,
 861 #ifdef CONFIG_NET_POLL_CONTROLLER
 862         .ndo_poll_controller    = arc_emac_poll_controller,
 863 #endif
 864 };
 865 
 866 int arc_emac_probe(struct net_device *ndev, int interface)
 867 {
 868         struct device *dev = ndev->dev.parent;
 869         struct resource res_regs;
 870         struct device_node *phy_node;
 871         struct phy_device *phydev = NULL;
 872         struct arc_emac_priv *priv;
 873         const char *mac_addr;
 874         unsigned int id, clock_frequency, irq;
 875         int err;
 876 
 877         /* Get PHY from device tree */
 878         phy_node = of_parse_phandle(dev->of_node, "phy", 0);
 879         if (!phy_node) {
 880                 dev_err(dev, "failed to retrieve phy description from device tree\n");
 881                 return -ENODEV;
 882         }
 883 
 884         /* Get EMAC registers base address from device tree */
 885         err = of_address_to_resource(dev->of_node, 0, &res_regs);
 886         if (err) {
 887                 dev_err(dev, "failed to retrieve registers base from device tree\n");
 888                 err = -ENODEV;
 889                 goto out_put_node;
 890         }
 891 
 892         /* Get IRQ from device tree */
 893         irq = irq_of_parse_and_map(dev->of_node, 0);
 894         if (!irq) {
 895                 dev_err(dev, "failed to retrieve <irq> value from device tree\n");
 896                 err = -ENODEV;
 897                 goto out_put_node;
 898         }
 899 
 900         ndev->netdev_ops = &arc_emac_netdev_ops;
 901         ndev->ethtool_ops = &arc_emac_ethtool_ops;
 902         ndev->watchdog_timeo = TX_TIMEOUT;
 903 
 904         priv = netdev_priv(ndev);
 905         priv->dev = dev;
 906 
 907         priv->regs = devm_ioremap_resource(dev, &res_regs);
 908         if (IS_ERR(priv->regs)) {
 909                 err = PTR_ERR(priv->regs);
 910                 goto out_put_node;
 911         }
 912 
 913         dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
 914 
 915         if (priv->clk) {
 916                 err = clk_prepare_enable(priv->clk);
 917                 if (err) {
 918                         dev_err(dev, "failed to enable clock\n");
 919                         goto out_put_node;
 920                 }
 921 
 922                 clock_frequency = clk_get_rate(priv->clk);
 923         } else {
 924                 /* Get CPU clock frequency from device tree */
 925                 if (of_property_read_u32(dev->of_node, "clock-frequency",
 926                                          &clock_frequency)) {
 927                         dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
 928                         err = -EINVAL;
 929                         goto out_put_node;
 930                 }
 931         }
 932 
 933         id = arc_reg_get(priv, R_ID);
 934 
 935         /* Check for EMAC revision 5 or 7, magic number */
 936         if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
 937                 dev_err(dev, "ARC EMAC not detected, id=0x%x\n", id);
 938                 err = -ENODEV;
 939                 goto out_clken;
 940         }
 941         dev_info(dev, "ARC EMAC detected with id: 0x%x\n", id);
 942 
 943         /* Set poll rate so that it polls every 1 ms */
 944         arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000);
 945 
 946         ndev->irq = irq;
 947         dev_info(dev, "IRQ is %d\n", ndev->irq);
 948 
 949         /* Register interrupt handler for device */
 950         err = devm_request_irq(dev, ndev->irq, arc_emac_intr, 0,
 951                                ndev->name, ndev);
 952         if (err) {
 953                 dev_err(dev, "could not allocate IRQ\n");
 954                 goto out_clken;
 955         }
 956 
 957         /* Get MAC address from device tree */
 958         mac_addr = of_get_mac_address(dev->of_node);
 959 
 960         if (!IS_ERR(mac_addr))
 961                 ether_addr_copy(ndev->dev_addr, mac_addr);
 962         else
 963                 eth_hw_addr_random(ndev);
 964 
 965         arc_emac_set_address_internal(ndev);
 966         dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
 967 
 968         /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
 969         priv->rxbd = dmam_alloc_coherent(dev, RX_RING_SZ + TX_RING_SZ,
 970                                          &priv->rxbd_dma, GFP_KERNEL);
 971 
 972         if (!priv->rxbd) {
 973                 dev_err(dev, "failed to allocate data buffers\n");
 974                 err = -ENOMEM;
 975                 goto out_clken;
 976         }
 977 
 978         priv->txbd = priv->rxbd + RX_BD_NUM;
 979 
 980         priv->txbd_dma = priv->rxbd_dma + RX_RING_SZ;
 981         dev_dbg(dev, "EMAC Device addr: Rx Ring [0x%x], Tx Ring[%x]\n",
 982                 (unsigned int)priv->rxbd_dma, (unsigned int)priv->txbd_dma);
 983 
 984         err = arc_mdio_probe(priv);
 985         if (err) {
 986                 dev_err(dev, "failed to probe MII bus\n");
 987                 goto out_clken;
 988         }
 989 
 990         phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
 991                                 interface);
 992         if (!phydev) {
 993                 dev_err(dev, "of_phy_connect() failed\n");
 994                 err = -ENODEV;
 995                 goto out_mdio;
 996         }
 997 
 998         dev_info(dev, "connected to %s phy with id 0x%x\n",
 999                  phydev->drv->name, phydev->phy_id);
1000 
1001         netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
1002 
1003         err = register_netdev(ndev);
1004         if (err) {
1005                 dev_err(dev, "failed to register network device\n");
1006                 goto out_netif_api;
1007         }
1008 
1009         of_node_put(phy_node);
1010         return 0;
1011 
1012 out_netif_api:
1013         netif_napi_del(&priv->napi);
1014         phy_disconnect(phydev);
1015 out_mdio:
1016         arc_mdio_remove(priv);
1017 out_clken:
1018         if (priv->clk)
1019                 clk_disable_unprepare(priv->clk);
1020 out_put_node:
1021         of_node_put(phy_node);
1022 
1023         return err;
1024 }
1025 EXPORT_SYMBOL_GPL(arc_emac_probe);
1026 
1027 int arc_emac_remove(struct net_device *ndev)
1028 {
1029         struct arc_emac_priv *priv = netdev_priv(ndev);
1030 
1031         phy_disconnect(ndev->phydev);
1032         arc_mdio_remove(priv);
1033         unregister_netdev(ndev);
1034         netif_napi_del(&priv->napi);
1035 
1036         if (!IS_ERR(priv->clk))
1037                 clk_disable_unprepare(priv->clk);
1038 
1039         return 0;
1040 }
1041 EXPORT_SYMBOL_GPL(arc_emac_remove);
1042 
1043 MODULE_AUTHOR("Alexey Brodkin <abrodkin@synopsys.com>");
1044 MODULE_DESCRIPTION("ARC EMAC driver");
1045 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */