root/drivers/net/ethernet/xilinx/ll_temac_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. _temac_ior_be
  2. _temac_iow_be
  3. _temac_ior_le
  4. _temac_iow_le
  5. hard_acs_rdy
  6. hard_acs_rdy_or_timeout
  7. temac_indirect_busywait
  8. temac_indirect_in32
  9. temac_indirect_in32_locked
  10. temac_indirect_out32
  11. temac_indirect_out32_locked
  12. temac_dma_in32_be
  13. temac_dma_in32_le
  14. temac_dma_out32_be
  15. temac_dma_out32_le
  16. temac_dma_dcr_in
  17. temac_dma_dcr_out
  18. temac_dcr_setup
  19. temac_dcr_setup
  20. temac_dma_bd_release
  21. temac_dma_bd_init
  22. temac_do_set_mac_address
  23. temac_init_mac_address
  24. temac_set_mac_address
  25. temac_set_multicast_list
  26. temac_setoptions
  27. temac_device_reset
  28. temac_adjust_link
  29. ptr_to_txbd
  30. ptr_from_txbd
  31. ptr_to_txbd
  32. ptr_from_txbd
  33. temac_start_xmit_done
  34. temac_check_tx_bd_space
  35. temac_start_xmit
  36. ll_temac_recv_buffers_available
  37. ll_temac_recv
  38. ll_temac_restart_work_func
  39. ll_temac_tx_irq
  40. ll_temac_rx_irq
  41. temac_open
  42. temac_stop
  43. temac_poll_controller
  44. temac_ioctl
  45. temac_show_llink_regs
  46. temac_probe
  47. temac_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Driver for Xilinx TEMAC Ethernet device
   4  *
   5  * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6  * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7  * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8  *
   9  * This is a driver for the Xilinx ll_temac ipcore which is often used
  10  * in the Virtex and Spartan series of chips.
  11  *
  12  * Notes:
  13  * - The ll_temac hardware uses indirect access for many of the TEMAC
  14  *   registers, include the MDIO bus.  However, indirect access to MDIO
  15  *   registers take considerably more clock cycles than to TEMAC registers.
  16  *   MDIO accesses are long, so threads doing them should probably sleep
  17  *   rather than busywait.  However, since only one indirect access can be
  18  *   in progress at any given time, that means that *all* indirect accesses
  19  *   could end up sleeping (to wait for an MDIO access to complete).
  20  *   Fortunately none of the indirect accesses are on the 'hot' path for tx
  21  *   or rx, so this should be okay.
  22  *
  23  * TODO:
  24  * - Factor out locallink DMA code into separate driver
  25  * - Fix support for hardware checksumming.
  26  * - Testing.  Lots and lots of testing.
  27  *
  28  */
  29 
  30 #include <linux/delay.h>
  31 #include <linux/etherdevice.h>
  32 #include <linux/mii.h>
  33 #include <linux/module.h>
  34 #include <linux/mutex.h>
  35 #include <linux/netdevice.h>
  36 #include <linux/if_ether.h>
  37 #include <linux/of.h>
  38 #include <linux/of_device.h>
  39 #include <linux/of_irq.h>
  40 #include <linux/of_mdio.h>
  41 #include <linux/of_net.h>
  42 #include <linux/of_platform.h>
  43 #include <linux/of_address.h>
  44 #include <linux/skbuff.h>
  45 #include <linux/spinlock.h>
  46 #include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
  47 #include <linux/udp.h>      /* needed for sizeof(udphdr) */
  48 #include <linux/phy.h>
  49 #include <linux/in.h>
  50 #include <linux/io.h>
  51 #include <linux/ip.h>
  52 #include <linux/slab.h>
  53 #include <linux/interrupt.h>
  54 #include <linux/workqueue.h>
  55 #include <linux/dma-mapping.h>
  56 #include <linux/processor.h>
  57 #include <linux/platform_data/xilinx-ll-temac.h>
  58 
  59 #include "ll_temac.h"
  60 
  61 #define TX_BD_NUM   64
  62 #define RX_BD_NUM   128
  63 
  64 /* ---------------------------------------------------------------------
  65  * Low level register access functions
  66  */
  67 
  68 static u32 _temac_ior_be(struct temac_local *lp, int offset)
  69 {
  70         return ioread32be(lp->regs + offset);
  71 }
  72 
  73 static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
  74 {
  75         return iowrite32be(value, lp->regs + offset);
  76 }
  77 
  78 static u32 _temac_ior_le(struct temac_local *lp, int offset)
  79 {
  80         return ioread32(lp->regs + offset);
  81 }
  82 
  83 static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
  84 {
  85         return iowrite32(value, lp->regs + offset);
  86 }
  87 
  88 static bool hard_acs_rdy(struct temac_local *lp)
  89 {
  90         return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
  91 }
  92 
  93 static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
  94 {
  95         ktime_t cur = ktime_get();
  96 
  97         return hard_acs_rdy(lp) || ktime_after(cur, timeout);
  98 }
  99 
 100 /* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
 101  * that was used before, and should cover MDIO bus speed down to 3200
 102  * Hz.
 103  */
 104 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
 105 
 106 /**
 107  * temac_indirect_busywait - Wait for current indirect register access
 108  * to complete.
 109  */
 110 int temac_indirect_busywait(struct temac_local *lp)
 111 {
 112         ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
 113 
 114         spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
 115         if (WARN_ON(!hard_acs_rdy(lp)))
 116                 return -ETIMEDOUT;
 117         else
 118                 return 0;
 119 }
 120 
 121 /**
 122  * temac_indirect_in32 - Indirect register read access.  This function
 123  * must be called without lp->indirect_lock being held.
 124  */
 125 u32 temac_indirect_in32(struct temac_local *lp, int reg)
 126 {
 127         unsigned long flags;
 128         int val;
 129 
 130         spin_lock_irqsave(lp->indirect_lock, flags);
 131         val = temac_indirect_in32_locked(lp, reg);
 132         spin_unlock_irqrestore(lp->indirect_lock, flags);
 133         return val;
 134 }
 135 
 136 /**
 137  * temac_indirect_in32_locked - Indirect register read access.  This
 138  * function must be called with lp->indirect_lock being held.  Use
 139  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
 140  * repeated lock/unlock and to ensure uninterrupted access to indirect
 141  * registers.
 142  */
 143 u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
 144 {
 145         /* This initial wait should normally not spin, as we always
 146          * try to wait for indirect access to complete before
 147          * releasing the indirect_lock.
 148          */
 149         if (WARN_ON(temac_indirect_busywait(lp)))
 150                 return -ETIMEDOUT;
 151         /* Initiate read from indirect register */
 152         temac_iow(lp, XTE_CTL0_OFFSET, reg);
 153         /* Wait for indirect register access to complete.  We really
 154          * should not see timeouts, and could even end up causing
 155          * problem for following indirect access, so let's make a bit
 156          * of WARN noise.
 157          */
 158         if (WARN_ON(temac_indirect_busywait(lp)))
 159                 return -ETIMEDOUT;
 160         /* Value is ready now */
 161         return temac_ior(lp, XTE_LSW0_OFFSET);
 162 }
 163 
 164 /**
 165  * temac_indirect_out32 - Indirect register write access.  This function
 166  * must be called without lp->indirect_lock being held.
 167  */
 168 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
 169 {
 170         unsigned long flags;
 171 
 172         spin_lock_irqsave(lp->indirect_lock, flags);
 173         temac_indirect_out32_locked(lp, reg, value);
 174         spin_unlock_irqrestore(lp->indirect_lock, flags);
 175 }
 176 
 177 /**
 178  * temac_indirect_out32_locked - Indirect register write access.  This
 179  * function must be called with lp->indirect_lock being held.  Use
 180  * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
 181  * repeated lock/unlock and to ensure uninterrupted access to indirect
 182  * registers.
 183  */
 184 void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
 185 {
 186         /* As in temac_indirect_in32_locked(), we should normally not
 187          * spin here.  And if it happens, we actually end up silently
 188          * ignoring the write request.  Ouch.
 189          */
 190         if (WARN_ON(temac_indirect_busywait(lp)))
 191                 return;
 192         /* Initiate write to indirect register */
 193         temac_iow(lp, XTE_LSW0_OFFSET, value);
 194         temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
 195         /* As in temac_indirect_in32_locked(), we should not see timeouts
 196          * here.  And if it happens, we continue before the write has
 197          * completed.  Not good.
 198          */
 199         WARN_ON(temac_indirect_busywait(lp));
 200 }
 201 
 202 /**
 203  * temac_dma_in32_* - Memory mapped DMA read, these function expects a
 204  * register input that is based on DCR word addresses which are then
 205  * converted to memory mapped byte addresses.  To be assigned to
 206  * lp->dma_in32.
 207  */
 208 static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
 209 {
 210         return ioread32be(lp->sdma_regs + (reg << 2));
 211 }
 212 
 213 static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
 214 {
 215         return ioread32(lp->sdma_regs + (reg << 2));
 216 }
 217 
 218 /**
 219  * temac_dma_out32_* - Memory mapped DMA read, these function expects
 220  * a register input that is based on DCR word addresses which are then
 221  * converted to memory mapped byte addresses.  To be assigned to
 222  * lp->dma_out32.
 223  */
 224 static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
 225 {
 226         iowrite32be(value, lp->sdma_regs + (reg << 2));
 227 }
 228 
 229 static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
 230 {
 231         iowrite32(value, lp->sdma_regs + (reg << 2));
 232 }
 233 
 234 /* DMA register access functions can be DCR based or memory mapped.
 235  * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
 236  * memory mapped.
 237  */
 238 #ifdef CONFIG_PPC_DCR
 239 
 240 /**
 241  * temac_dma_dcr_in32 - DCR based DMA read
 242  */
 243 static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
 244 {
 245         return dcr_read(lp->sdma_dcrs, reg);
 246 }
 247 
 248 /**
 249  * temac_dma_dcr_out32 - DCR based DMA write
 250  */
 251 static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
 252 {
 253         dcr_write(lp->sdma_dcrs, reg, value);
 254 }
 255 
 256 /**
 257  * temac_dcr_setup - If the DMA is DCR based, then setup the address and
 258  * I/O  functions
 259  */
 260 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 261                                 struct device_node *np)
 262 {
 263         unsigned int dcrs;
 264 
 265         /* setup the dcr address mapping if it's in the device tree */
 266 
 267         dcrs = dcr_resource_start(np, 0);
 268         if (dcrs != 0) {
 269                 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
 270                 lp->dma_in = temac_dma_dcr_in;
 271                 lp->dma_out = temac_dma_dcr_out;
 272                 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
 273                 return 0;
 274         }
 275         /* no DCR in the device tree, indicate a failure */
 276         return -1;
 277 }
 278 
 279 #else
 280 
 281 /*
 282  * temac_dcr_setup - This is a stub for when DCR is not supported,
 283  * such as with MicroBlaze and x86
 284  */
 285 static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 286                                 struct device_node *np)
 287 {
 288         return -1;
 289 }
 290 
 291 #endif
 292 
 293 /**
 294  * temac_dma_bd_release - Release buffer descriptor rings
 295  */
 296 static void temac_dma_bd_release(struct net_device *ndev)
 297 {
 298         struct temac_local *lp = netdev_priv(ndev);
 299         int i;
 300 
 301         /* Reset Local Link (DMA) */
 302         lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 303 
 304         for (i = 0; i < RX_BD_NUM; i++) {
 305                 if (!lp->rx_skb[i])
 306                         break;
 307                 else {
 308                         dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 309                                         XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
 310                         dev_kfree_skb(lp->rx_skb[i]);
 311                 }
 312         }
 313         if (lp->rx_bd_v)
 314                 dma_free_coherent(ndev->dev.parent,
 315                                 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 316                                 lp->rx_bd_v, lp->rx_bd_p);
 317         if (lp->tx_bd_v)
 318                 dma_free_coherent(ndev->dev.parent,
 319                                 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 320                                 lp->tx_bd_v, lp->tx_bd_p);
 321 }
 322 
 323 /**
 324  * temac_dma_bd_init - Setup buffer descriptor rings
 325  */
 326 static int temac_dma_bd_init(struct net_device *ndev)
 327 {
 328         struct temac_local *lp = netdev_priv(ndev);
 329         struct sk_buff *skb;
 330         dma_addr_t skb_dma_addr;
 331         int i;
 332 
 333         lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
 334                                   GFP_KERNEL);
 335         if (!lp->rx_skb)
 336                 goto out;
 337 
 338         /* allocate the tx and rx ring buffer descriptors. */
 339         /* returns a virtual address and a physical address. */
 340         lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 341                                          sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 342                                          &lp->tx_bd_p, GFP_KERNEL);
 343         if (!lp->tx_bd_v)
 344                 goto out;
 345 
 346         lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 347                                          sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 348                                          &lp->rx_bd_p, GFP_KERNEL);
 349         if (!lp->rx_bd_v)
 350                 goto out;
 351 
 352         for (i = 0; i < TX_BD_NUM; i++) {
 353                 lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
 354                                 + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
 355         }
 356 
 357         for (i = 0; i < RX_BD_NUM; i++) {
 358                 lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
 359                                 + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
 360 
 361                 skb = netdev_alloc_skb_ip_align(ndev,
 362                                                 XTE_MAX_JUMBO_FRAME_SIZE);
 363                 if (!skb)
 364                         goto out;
 365 
 366                 lp->rx_skb[i] = skb;
 367                 /* returns physical address of skb->data */
 368                 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 369                                               XTE_MAX_JUMBO_FRAME_SIZE,
 370                                               DMA_FROM_DEVICE);
 371                 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
 372                         goto out;
 373                 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
 374                 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
 375                 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
 376         }
 377 
 378         /* Configure DMA channel (irq setup) */
 379         lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
 380                     0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
 381                     CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
 382                     CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
 383         lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
 384                     CHNL_CTRL_IRQ_IOE |
 385                     CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
 386                     CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
 387 
 388         /* Init descriptor indexes */
 389         lp->tx_bd_ci = 0;
 390         lp->tx_bd_next = 0;
 391         lp->tx_bd_tail = 0;
 392         lp->rx_bd_ci = 0;
 393         lp->rx_bd_tail = RX_BD_NUM - 1;
 394 
 395         /* Enable RX DMA transfers */
 396         wmb();
 397         lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
 398         lp->dma_out(lp, RX_TAILDESC_PTR,
 399                        lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
 400 
 401         /* Prepare for TX DMA transfer */
 402         lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 403 
 404         return 0;
 405 
 406 out:
 407         temac_dma_bd_release(ndev);
 408         return -ENOMEM;
 409 }
 410 
 411 /* ---------------------------------------------------------------------
 412  * net_device_ops
 413  */
 414 
 415 static void temac_do_set_mac_address(struct net_device *ndev)
 416 {
 417         struct temac_local *lp = netdev_priv(ndev);
 418         unsigned long flags;
 419 
 420         /* set up unicast MAC address filter set its mac address */
 421         spin_lock_irqsave(lp->indirect_lock, flags);
 422         temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
 423                                     (ndev->dev_addr[0]) |
 424                                     (ndev->dev_addr[1] << 8) |
 425                                     (ndev->dev_addr[2] << 16) |
 426                                     (ndev->dev_addr[3] << 24));
 427         /* There are reserved bits in EUAW1
 428          * so don't affect them Set MAC bits [47:32] in EUAW1 */
 429         temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
 430                                     (ndev->dev_addr[4] & 0x000000ff) |
 431                                     (ndev->dev_addr[5] << 8));
 432         spin_unlock_irqrestore(lp->indirect_lock, flags);
 433 }
 434 
 435 static int temac_init_mac_address(struct net_device *ndev, const void *address)
 436 {
 437         ether_addr_copy(ndev->dev_addr, address);
 438         if (!is_valid_ether_addr(ndev->dev_addr))
 439                 eth_hw_addr_random(ndev);
 440         temac_do_set_mac_address(ndev);
 441         return 0;
 442 }
 443 
 444 static int temac_set_mac_address(struct net_device *ndev, void *p)
 445 {
 446         struct sockaddr *addr = p;
 447 
 448         if (!is_valid_ether_addr(addr->sa_data))
 449                 return -EADDRNOTAVAIL;
 450         memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
 451         temac_do_set_mac_address(ndev);
 452         return 0;
 453 }
 454 
 455 static void temac_set_multicast_list(struct net_device *ndev)
 456 {
 457         struct temac_local *lp = netdev_priv(ndev);
 458         u32 multi_addr_msw, multi_addr_lsw;
 459         int i = 0;
 460         unsigned long flags;
 461         bool promisc_mode_disabled = false;
 462 
 463         if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
 464             (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
 465                 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
 466                 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 467                 return;
 468         }
 469 
 470         spin_lock_irqsave(lp->indirect_lock, flags);
 471 
 472         if (!netdev_mc_empty(ndev)) {
 473                 struct netdev_hw_addr *ha;
 474 
 475                 netdev_for_each_mc_addr(ha, ndev) {
 476                         if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
 477                                 break;
 478                         multi_addr_msw = ((ha->addr[3] << 24) |
 479                                           (ha->addr[2] << 16) |
 480                                           (ha->addr[1] << 8) |
 481                                           (ha->addr[0]));
 482                         temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
 483                                                     multi_addr_msw);
 484                         multi_addr_lsw = ((ha->addr[5] << 8) |
 485                                           (ha->addr[4]) | (i << 16));
 486                         temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
 487                                                     multi_addr_lsw);
 488                         i++;
 489                 }
 490         }
 491 
 492         /* Clear all or remaining/unused address table entries */
 493         while (i < MULTICAST_CAM_TABLE_NUM) {
 494                 temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
 495                 temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
 496                 i++;
 497         }
 498 
 499         /* Enable address filter block if currently disabled */
 500         if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
 501             & XTE_AFM_EPPRM_MASK) {
 502                 temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
 503                 promisc_mode_disabled = true;
 504         }
 505 
 506         spin_unlock_irqrestore(lp->indirect_lock, flags);
 507 
 508         if (promisc_mode_disabled)
 509                 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 510 }
 511 
 512 static struct temac_option {
 513         int flg;
 514         u32 opt;
 515         u32 reg;
 516         u32 m_or;
 517         u32 m_and;
 518 } temac_options[] = {
 519         /* Turn on jumbo packet support for both Rx and Tx */
 520         {
 521                 .opt = XTE_OPTION_JUMBO,
 522                 .reg = XTE_TXC_OFFSET,
 523                 .m_or = XTE_TXC_TXJMBO_MASK,
 524         },
 525         {
 526                 .opt = XTE_OPTION_JUMBO,
 527                 .reg = XTE_RXC1_OFFSET,
 528                 .m_or =XTE_RXC1_RXJMBO_MASK,
 529         },
 530         /* Turn on VLAN packet support for both Rx and Tx */
 531         {
 532                 .opt = XTE_OPTION_VLAN,
 533                 .reg = XTE_TXC_OFFSET,
 534                 .m_or =XTE_TXC_TXVLAN_MASK,
 535         },
 536         {
 537                 .opt = XTE_OPTION_VLAN,
 538                 .reg = XTE_RXC1_OFFSET,
 539                 .m_or =XTE_RXC1_RXVLAN_MASK,
 540         },
 541         /* Turn on FCS stripping on receive packets */
 542         {
 543                 .opt = XTE_OPTION_FCS_STRIP,
 544                 .reg = XTE_RXC1_OFFSET,
 545                 .m_or =XTE_RXC1_RXFCS_MASK,
 546         },
 547         /* Turn on FCS insertion on transmit packets */
 548         {
 549                 .opt = XTE_OPTION_FCS_INSERT,
 550                 .reg = XTE_TXC_OFFSET,
 551                 .m_or =XTE_TXC_TXFCS_MASK,
 552         },
 553         /* Turn on length/type field checking on receive packets */
 554         {
 555                 .opt = XTE_OPTION_LENTYPE_ERR,
 556                 .reg = XTE_RXC1_OFFSET,
 557                 .m_or =XTE_RXC1_RXLT_MASK,
 558         },
 559         /* Turn on flow control */
 560         {
 561                 .opt = XTE_OPTION_FLOW_CONTROL,
 562                 .reg = XTE_FCC_OFFSET,
 563                 .m_or =XTE_FCC_RXFLO_MASK,
 564         },
 565         /* Turn on flow control */
 566         {
 567                 .opt = XTE_OPTION_FLOW_CONTROL,
 568                 .reg = XTE_FCC_OFFSET,
 569                 .m_or =XTE_FCC_TXFLO_MASK,
 570         },
 571         /* Turn on promiscuous frame filtering (all frames are received ) */
 572         {
 573                 .opt = XTE_OPTION_PROMISC,
 574                 .reg = XTE_AFM_OFFSET,
 575                 .m_or =XTE_AFM_EPPRM_MASK,
 576         },
 577         /* Enable transmitter if not already enabled */
 578         {
 579                 .opt = XTE_OPTION_TXEN,
 580                 .reg = XTE_TXC_OFFSET,
 581                 .m_or =XTE_TXC_TXEN_MASK,
 582         },
 583         /* Enable receiver? */
 584         {
 585                 .opt = XTE_OPTION_RXEN,
 586                 .reg = XTE_RXC1_OFFSET,
 587                 .m_or =XTE_RXC1_RXEN_MASK,
 588         },
 589         {}
 590 };
 591 
 592 /**
 593  * temac_setoptions
 594  */
 595 static u32 temac_setoptions(struct net_device *ndev, u32 options)
 596 {
 597         struct temac_local *lp = netdev_priv(ndev);
 598         struct temac_option *tp = &temac_options[0];
 599         int reg;
 600         unsigned long flags;
 601 
 602         spin_lock_irqsave(lp->indirect_lock, flags);
 603         while (tp->opt) {
 604                 reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
 605                 if (options & tp->opt) {
 606                         reg |= tp->m_or;
 607                         temac_indirect_out32_locked(lp, tp->reg, reg);
 608                 }
 609                 tp++;
 610         }
 611         spin_unlock_irqrestore(lp->indirect_lock, flags);
 612         lp->options |= options;
 613 
 614         return 0;
 615 }
 616 
 617 /* Initialize temac */
 618 static void temac_device_reset(struct net_device *ndev)
 619 {
 620         struct temac_local *lp = netdev_priv(ndev);
 621         u32 timeout;
 622         u32 val;
 623         unsigned long flags;
 624 
 625         /* Perform a software reset */
 626 
 627         /* 0x300 host enable bit ? */
 628         /* reset PHY through control register ?:1 */
 629 
 630         dev_dbg(&ndev->dev, "%s()\n", __func__);
 631 
 632         /* Reset the receiver and wait for it to finish reset */
 633         temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
 634         timeout = 1000;
 635         while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
 636                 udelay(1);
 637                 if (--timeout == 0) {
 638                         dev_err(&ndev->dev,
 639                                 "temac_device_reset RX reset timeout!!\n");
 640                         break;
 641                 }
 642         }
 643 
 644         /* Reset the transmitter and wait for it to finish reset */
 645         temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
 646         timeout = 1000;
 647         while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
 648                 udelay(1);
 649                 if (--timeout == 0) {
 650                         dev_err(&ndev->dev,
 651                                 "temac_device_reset TX reset timeout!!\n");
 652                         break;
 653                 }
 654         }
 655 
 656         /* Disable the receiver */
 657         spin_lock_irqsave(lp->indirect_lock, flags);
 658         val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
 659         temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
 660                                     val & ~XTE_RXC1_RXEN_MASK);
 661         spin_unlock_irqrestore(lp->indirect_lock, flags);
 662 
 663         /* Reset Local Link (DMA) */
 664         lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 665         timeout = 1000;
 666         while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
 667                 udelay(1);
 668                 if (--timeout == 0) {
 669                         dev_err(&ndev->dev,
 670                                 "temac_device_reset DMA reset timeout!!\n");
 671                         break;
 672                 }
 673         }
 674         lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
 675 
 676         if (temac_dma_bd_init(ndev)) {
 677                 dev_err(&ndev->dev,
 678                                 "temac_device_reset descriptor allocation failed\n");
 679         }
 680 
 681         spin_lock_irqsave(lp->indirect_lock, flags);
 682         temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
 683         temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
 684         temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
 685         temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
 686         spin_unlock_irqrestore(lp->indirect_lock, flags);
 687 
 688         /* Sync default options with HW
 689          * but leave receiver and transmitter disabled.  */
 690         temac_setoptions(ndev,
 691                          lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
 692 
 693         temac_do_set_mac_address(ndev);
 694 
 695         /* Set address filter table */
 696         temac_set_multicast_list(ndev);
 697         if (temac_setoptions(ndev, lp->options))
 698                 dev_err(&ndev->dev, "Error setting TEMAC options\n");
 699 
 700         /* Init Driver variable */
 701         netif_trans_update(ndev); /* prevent tx timeout */
 702 }
 703 
 704 static void temac_adjust_link(struct net_device *ndev)
 705 {
 706         struct temac_local *lp = netdev_priv(ndev);
 707         struct phy_device *phy = ndev->phydev;
 708         u32 mii_speed;
 709         int link_state;
 710         unsigned long flags;
 711 
 712         /* hash together the state values to decide if something has changed */
 713         link_state = phy->speed | (phy->duplex << 1) | phy->link;
 714 
 715         if (lp->last_link != link_state) {
 716                 spin_lock_irqsave(lp->indirect_lock, flags);
 717                 mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
 718                 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
 719 
 720                 switch (phy->speed) {
 721                 case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
 722                 case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
 723                 case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
 724                 }
 725 
 726                 /* Write new speed setting out to TEMAC */
 727                 temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
 728                 spin_unlock_irqrestore(lp->indirect_lock, flags);
 729 
 730                 lp->last_link = link_state;
 731                 phy_print_status(phy);
 732         }
 733 }
 734 
 735 #ifdef CONFIG_64BIT
 736 
 737 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
 738 {
 739         bd->app3 = (u32)(((u64)p) >> 32);
 740         bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
 741 }
 742 
 743 static void *ptr_from_txbd(struct cdmac_bd *bd)
 744 {
 745         return (void *)(((u64)(bd->app3) << 32) | bd->app4);
 746 }
 747 
 748 #else
 749 
 750 static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
 751 {
 752         bd->app4 = (u32)p;
 753 }
 754 
 755 static void *ptr_from_txbd(struct cdmac_bd *bd)
 756 {
 757         return (void *)(bd->app4);
 758 }
 759 
 760 #endif
 761 
 762 static void temac_start_xmit_done(struct net_device *ndev)
 763 {
 764         struct temac_local *lp = netdev_priv(ndev);
 765         struct cdmac_bd *cur_p;
 766         unsigned int stat = 0;
 767         struct sk_buff *skb;
 768 
 769         cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 770         stat = be32_to_cpu(cur_p->app0);
 771 
 772         while (stat & STS_CTRL_APP0_CMPLT) {
 773                 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
 774                                  be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
 775                 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
 776                 if (skb)
 777                         dev_consume_skb_irq(skb);
 778                 cur_p->app0 = 0;
 779                 cur_p->app1 = 0;
 780                 cur_p->app2 = 0;
 781                 cur_p->app3 = 0;
 782                 cur_p->app4 = 0;
 783 
 784                 ndev->stats.tx_packets++;
 785                 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
 786 
 787                 lp->tx_bd_ci++;
 788                 if (lp->tx_bd_ci >= TX_BD_NUM)
 789                         lp->tx_bd_ci = 0;
 790 
 791                 cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 792                 stat = be32_to_cpu(cur_p->app0);
 793         }
 794 
 795         /* Matches barrier in temac_start_xmit */
 796         smp_mb();
 797 
 798         netif_wake_queue(ndev);
 799 }
 800 
 801 static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
 802 {
 803         struct cdmac_bd *cur_p;
 804         int tail;
 805 
 806         tail = lp->tx_bd_tail;
 807         cur_p = &lp->tx_bd_v[tail];
 808 
 809         do {
 810                 if (cur_p->app0)
 811                         return NETDEV_TX_BUSY;
 812 
 813                 tail++;
 814                 if (tail >= TX_BD_NUM)
 815                         tail = 0;
 816 
 817                 cur_p = &lp->tx_bd_v[tail];
 818                 num_frag--;
 819         } while (num_frag >= 0);
 820 
 821         return 0;
 822 }
 823 
 824 static netdev_tx_t
 825 temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 826 {
 827         struct temac_local *lp = netdev_priv(ndev);
 828         struct cdmac_bd *cur_p;
 829         dma_addr_t start_p, tail_p, skb_dma_addr;
 830         int ii;
 831         unsigned long num_frag;
 832         skb_frag_t *frag;
 833 
 834         num_frag = skb_shinfo(skb)->nr_frags;
 835         frag = &skb_shinfo(skb)->frags[0];
 836         start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 837         cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 838 
 839         if (temac_check_tx_bd_space(lp, num_frag + 1)) {
 840                 if (netif_queue_stopped(ndev))
 841                         return NETDEV_TX_BUSY;
 842 
 843                 netif_stop_queue(ndev);
 844 
 845                 /* Matches barrier in temac_start_xmit_done */
 846                 smp_mb();
 847 
 848                 /* Space might have just been freed - check again */
 849                 if (temac_check_tx_bd_space(lp, num_frag))
 850                         return NETDEV_TX_BUSY;
 851 
 852                 netif_wake_queue(ndev);
 853         }
 854 
 855         cur_p->app0 = 0;
 856         if (skb->ip_summed == CHECKSUM_PARTIAL) {
 857                 unsigned int csum_start_off = skb_checksum_start_offset(skb);
 858                 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
 859 
 860                 cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
 861                 cur_p->app1 = cpu_to_be32((csum_start_off << 16)
 862                                           | csum_index_off);
 863                 cur_p->app2 = 0;  /* initial checksum seed */
 864         }
 865 
 866         cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
 867         skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 868                                       skb_headlen(skb), DMA_TO_DEVICE);
 869         cur_p->len = cpu_to_be32(skb_headlen(skb));
 870         if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
 871                 dev_kfree_skb_any(skb);
 872                 ndev->stats.tx_dropped++;
 873                 return NETDEV_TX_OK;
 874         }
 875         cur_p->phys = cpu_to_be32(skb_dma_addr);
 876         ptr_to_txbd((void *)skb, cur_p);
 877 
 878         for (ii = 0; ii < num_frag; ii++) {
 879                 if (++lp->tx_bd_tail >= TX_BD_NUM)
 880                         lp->tx_bd_tail = 0;
 881 
 882                 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 883                 skb_dma_addr = dma_map_single(ndev->dev.parent,
 884                                               skb_frag_address(frag),
 885                                               skb_frag_size(frag),
 886                                               DMA_TO_DEVICE);
 887                 if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
 888                         if (--lp->tx_bd_tail < 0)
 889                                 lp->tx_bd_tail = TX_BD_NUM - 1;
 890                         cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 891                         while (--ii >= 0) {
 892                                 --frag;
 893                                 dma_unmap_single(ndev->dev.parent,
 894                                                  be32_to_cpu(cur_p->phys),
 895                                                  skb_frag_size(frag),
 896                                                  DMA_TO_DEVICE);
 897                                 if (--lp->tx_bd_tail < 0)
 898                                         lp->tx_bd_tail = TX_BD_NUM - 1;
 899                                 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 900                         }
 901                         dma_unmap_single(ndev->dev.parent,
 902                                          be32_to_cpu(cur_p->phys),
 903                                          skb_headlen(skb), DMA_TO_DEVICE);
 904                         dev_kfree_skb_any(skb);
 905                         ndev->stats.tx_dropped++;
 906                         return NETDEV_TX_OK;
 907                 }
 908                 cur_p->phys = cpu_to_be32(skb_dma_addr);
 909                 cur_p->len = cpu_to_be32(skb_frag_size(frag));
 910                 cur_p->app0 = 0;
 911                 frag++;
 912         }
 913         cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
 914 
 915         tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 916         lp->tx_bd_tail++;
 917         if (lp->tx_bd_tail >= TX_BD_NUM)
 918                 lp->tx_bd_tail = 0;
 919 
 920         skb_tx_timestamp(skb);
 921 
 922         /* Kick off the transfer */
 923         wmb();
 924         lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 925 
 926         return NETDEV_TX_OK;
 927 }
 928 
 929 static int ll_temac_recv_buffers_available(struct temac_local *lp)
 930 {
 931         int available;
 932 
 933         if (!lp->rx_skb[lp->rx_bd_ci])
 934                 return 0;
 935         available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
 936         if (available <= 0)
 937                 available += RX_BD_NUM;
 938         return available;
 939 }
 940 
 941 static void ll_temac_recv(struct net_device *ndev)
 942 {
 943         struct temac_local *lp = netdev_priv(ndev);
 944         unsigned long flags;
 945         int rx_bd;
 946         bool update_tail = false;
 947 
 948         spin_lock_irqsave(&lp->rx_lock, flags);
 949 
 950         /* Process all received buffers, passing them on network
 951          * stack.  After this, the buffer descriptors will be in an
 952          * un-allocated stage, where no skb is allocated for it, and
 953          * they are therefore not available for TEMAC/DMA.
 954          */
 955         do {
 956                 struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
 957                 struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
 958                 unsigned int bdstat = be32_to_cpu(bd->app0);
 959                 int length;
 960 
 961                 /* While this should not normally happen, we can end
 962                  * here when GFP_ATOMIC allocations fail, and we
 963                  * therefore have un-allocated buffers.
 964                  */
 965                 if (!skb)
 966                         break;
 967 
 968                 /* Loop over all completed buffer descriptors */
 969                 if (!(bdstat & STS_CTRL_APP0_CMPLT))
 970                         break;
 971 
 972                 dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
 973                                  XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
 974                 /* The buffer is not valid for DMA anymore */
 975                 bd->phys = 0;
 976                 bd->len = 0;
 977 
 978                 length = be32_to_cpu(bd->app4) & 0x3FFF;
 979                 skb_put(skb, length);
 980                 skb->protocol = eth_type_trans(skb, ndev);
 981                 skb_checksum_none_assert(skb);
 982 
 983                 /* if we're doing rx csum offload, set it up */
 984                 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
 985                     (skb->protocol == htons(ETH_P_IP)) &&
 986                     (skb->len > 64)) {
 987 
 988                         /* Convert from device endianness (be32) to cpu
 989                          * endiannes, and if necessary swap the bytes
 990                          * (back) for proper IP checksum byte order
 991                          * (be16).
 992                          */
 993                         skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
 994                         skb->ip_summed = CHECKSUM_COMPLETE;
 995                 }
 996 
 997                 if (!skb_defer_rx_timestamp(skb))
 998                         netif_rx(skb);
 999                 /* The skb buffer is now owned by network stack above */
1000                 lp->rx_skb[lp->rx_bd_ci] = NULL;
1001 
1002                 ndev->stats.rx_packets++;
1003                 ndev->stats.rx_bytes += length;
1004 
1005                 rx_bd = lp->rx_bd_ci;
1006                 if (++lp->rx_bd_ci >= RX_BD_NUM)
1007                         lp->rx_bd_ci = 0;
1008         } while (rx_bd != lp->rx_bd_tail);
1009 
1010         /* DMA operations will halt when the last buffer descriptor is
1011          * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1012          * When that happens, no more interrupt events will be
1013          * generated.  No IRQ_COAL or IRQ_DLY, and not even an
1014          * IRQ_ERR.  To avoid stalling, we schedule a delayed work
1015          * when there is a potential risk of that happening.  The work
1016          * will call this function, and thus re-schedule itself until
1017          * enough buffers are available again.
1018          */
1019         if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1020                 schedule_delayed_work(&lp->restart_work, HZ / 1000);
1021 
1022         /* Allocate new buffers for those buffer descriptors that were
1023          * passed to network stack.  Note that GFP_ATOMIC allocations
1024          * can fail (e.g. when a larger burst of GFP_ATOMIC
1025          * allocations occurs), so while we try to allocate all
1026          * buffers in the same interrupt where they were processed, we
1027          * continue with what we could get in case of allocation
1028          * failure.  Allocation of remaining buffers will be retried
1029          * in following calls.
1030          */
1031         while (1) {
1032                 struct sk_buff *skb;
1033                 struct cdmac_bd *bd;
1034                 dma_addr_t skb_dma_addr;
1035 
1036                 rx_bd = lp->rx_bd_tail + 1;
1037                 if (rx_bd >= RX_BD_NUM)
1038                         rx_bd = 0;
1039                 bd = &lp->rx_bd_v[rx_bd];
1040 
1041                 if (bd->phys)
1042                         break;  /* All skb's allocated */
1043 
1044                 skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1045                 if (!skb) {
1046                         dev_warn(&ndev->dev, "skb alloc failed\n");
1047                         break;
1048                 }
1049 
1050                 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1051                                               XTE_MAX_JUMBO_FRAME_SIZE,
1052                                               DMA_FROM_DEVICE);
1053                 if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1054                                                    skb_dma_addr))) {
1055                         dev_kfree_skb_any(skb);
1056                         break;
1057                 }
1058 
1059                 bd->phys = cpu_to_be32(skb_dma_addr);
1060                 bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1061                 bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1062                 lp->rx_skb[rx_bd] = skb;
1063 
1064                 lp->rx_bd_tail = rx_bd;
1065                 update_tail = true;
1066         }
1067 
1068         /* Move tail pointer when buffers have been allocated */
1069         if (update_tail) {
1070                 lp->dma_out(lp, RX_TAILDESC_PTR,
1071                         lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1072         }
1073 
1074         spin_unlock_irqrestore(&lp->rx_lock, flags);
1075 }
1076 
1077 /* Function scheduled to ensure a restart in case of DMA halt
1078  * condition caused by running out of buffer descriptors.
1079  */
1080 static void ll_temac_restart_work_func(struct work_struct *work)
1081 {
1082         struct temac_local *lp = container_of(work, struct temac_local,
1083                                               restart_work.work);
1084         struct net_device *ndev = lp->ndev;
1085 
1086         ll_temac_recv(ndev);
1087 }
1088 
1089 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1090 {
1091         struct net_device *ndev = _ndev;
1092         struct temac_local *lp = netdev_priv(ndev);
1093         unsigned int status;
1094 
1095         status = lp->dma_in(lp, TX_IRQ_REG);
1096         lp->dma_out(lp, TX_IRQ_REG, status);
1097 
1098         if (status & (IRQ_COAL | IRQ_DLY))
1099                 temac_start_xmit_done(lp->ndev);
1100         if (status & (IRQ_ERR | IRQ_DMAERR))
1101                 dev_err_ratelimited(&ndev->dev,
1102                                     "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1103                                     status, lp->dma_in(lp, TX_CHNL_STS));
1104 
1105         return IRQ_HANDLED;
1106 }
1107 
1108 static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1109 {
1110         struct net_device *ndev = _ndev;
1111         struct temac_local *lp = netdev_priv(ndev);
1112         unsigned int status;
1113 
1114         /* Read and clear the status registers */
1115         status = lp->dma_in(lp, RX_IRQ_REG);
1116         lp->dma_out(lp, RX_IRQ_REG, status);
1117 
1118         if (status & (IRQ_COAL | IRQ_DLY))
1119                 ll_temac_recv(lp->ndev);
1120         if (status & (IRQ_ERR | IRQ_DMAERR))
1121                 dev_err_ratelimited(&ndev->dev,
1122                                     "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1123                                     status, lp->dma_in(lp, RX_CHNL_STS));
1124 
1125         return IRQ_HANDLED;
1126 }
1127 
1128 static int temac_open(struct net_device *ndev)
1129 {
1130         struct temac_local *lp = netdev_priv(ndev);
1131         struct phy_device *phydev = NULL;
1132         int rc;
1133 
1134         dev_dbg(&ndev->dev, "temac_open()\n");
1135 
1136         if (lp->phy_node) {
1137                 phydev = of_phy_connect(lp->ndev, lp->phy_node,
1138                                         temac_adjust_link, 0, 0);
1139                 if (!phydev) {
1140                         dev_err(lp->dev, "of_phy_connect() failed\n");
1141                         return -ENODEV;
1142                 }
1143                 phy_start(phydev);
1144         } else if (strlen(lp->phy_name) > 0) {
1145                 phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1146                                      lp->phy_interface);
1147                 if (IS_ERR(phydev)) {
1148                         dev_err(lp->dev, "phy_connect() failed\n");
1149                         return PTR_ERR(phydev);
1150                 }
1151                 phy_start(phydev);
1152         }
1153 
1154         temac_device_reset(ndev);
1155 
1156         rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1157         if (rc)
1158                 goto err_tx_irq;
1159         rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1160         if (rc)
1161                 goto err_rx_irq;
1162 
1163         return 0;
1164 
1165  err_rx_irq:
1166         free_irq(lp->tx_irq, ndev);
1167  err_tx_irq:
1168         if (phydev)
1169                 phy_disconnect(phydev);
1170         dev_err(lp->dev, "request_irq() failed\n");
1171         return rc;
1172 }
1173 
1174 static int temac_stop(struct net_device *ndev)
1175 {
1176         struct temac_local *lp = netdev_priv(ndev);
1177         struct phy_device *phydev = ndev->phydev;
1178 
1179         dev_dbg(&ndev->dev, "temac_close()\n");
1180 
1181         cancel_delayed_work_sync(&lp->restart_work);
1182 
1183         free_irq(lp->tx_irq, ndev);
1184         free_irq(lp->rx_irq, ndev);
1185 
1186         if (phydev)
1187                 phy_disconnect(phydev);
1188 
1189         temac_dma_bd_release(ndev);
1190 
1191         return 0;
1192 }
1193 
1194 #ifdef CONFIG_NET_POLL_CONTROLLER
1195 static void
1196 temac_poll_controller(struct net_device *ndev)
1197 {
1198         struct temac_local *lp = netdev_priv(ndev);
1199 
1200         disable_irq(lp->tx_irq);
1201         disable_irq(lp->rx_irq);
1202 
1203         ll_temac_rx_irq(lp->tx_irq, ndev);
1204         ll_temac_tx_irq(lp->rx_irq, ndev);
1205 
1206         enable_irq(lp->tx_irq);
1207         enable_irq(lp->rx_irq);
1208 }
1209 #endif
1210 
1211 static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1212 {
1213         if (!netif_running(ndev))
1214                 return -EINVAL;
1215 
1216         if (!ndev->phydev)
1217                 return -EINVAL;
1218 
1219         return phy_mii_ioctl(ndev->phydev, rq, cmd);
1220 }
1221 
1222 static const struct net_device_ops temac_netdev_ops = {
1223         .ndo_open = temac_open,
1224         .ndo_stop = temac_stop,
1225         .ndo_start_xmit = temac_start_xmit,
1226         .ndo_set_rx_mode = temac_set_multicast_list,
1227         .ndo_set_mac_address = temac_set_mac_address,
1228         .ndo_validate_addr = eth_validate_addr,
1229         .ndo_do_ioctl = temac_ioctl,
1230 #ifdef CONFIG_NET_POLL_CONTROLLER
1231         .ndo_poll_controller = temac_poll_controller,
1232 #endif
1233 };
1234 
1235 /* ---------------------------------------------------------------------
1236  * SYSFS device attributes
1237  */
1238 static ssize_t temac_show_llink_regs(struct device *dev,
1239                                      struct device_attribute *attr, char *buf)
1240 {
1241         struct net_device *ndev = dev_get_drvdata(dev);
1242         struct temac_local *lp = netdev_priv(ndev);
1243         int i, len = 0;
1244 
1245         for (i = 0; i < 0x11; i++)
1246                 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1247                                (i % 8) == 7 ? "\n" : " ");
1248         len += sprintf(buf + len, "\n");
1249 
1250         return len;
1251 }
1252 
1253 static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1254 
1255 static struct attribute *temac_device_attrs[] = {
1256         &dev_attr_llink_regs.attr,
1257         NULL,
1258 };
1259 
1260 static const struct attribute_group temac_attr_group = {
1261         .attrs = temac_device_attrs,
1262 };
1263 
1264 /* ethtool support */
1265 static const struct ethtool_ops temac_ethtool_ops = {
1266         .nway_reset = phy_ethtool_nway_reset,
1267         .get_link = ethtool_op_get_link,
1268         .get_ts_info = ethtool_op_get_ts_info,
1269         .get_link_ksettings = phy_ethtool_get_link_ksettings,
1270         .set_link_ksettings = phy_ethtool_set_link_ksettings,
1271 };
1272 
1273 static int temac_probe(struct platform_device *pdev)
1274 {
1275         struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1276         struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1277         struct temac_local *lp;
1278         struct net_device *ndev;
1279         struct resource *res;
1280         const void *addr;
1281         __be32 *p;
1282         bool little_endian;
1283         int rc = 0;
1284 
1285         /* Init network device structure */
1286         ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1287         if (!ndev)
1288                 return -ENOMEM;
1289 
1290         platform_set_drvdata(pdev, ndev);
1291         SET_NETDEV_DEV(ndev, &pdev->dev);
1292         ndev->features = NETIF_F_SG;
1293         ndev->netdev_ops = &temac_netdev_ops;
1294         ndev->ethtool_ops = &temac_ethtool_ops;
1295 #if 0
1296         ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1297         ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1298         ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1299         ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1300         ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1301         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1302         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1303         ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1304         ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1305         ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1306         ndev->features |= NETIF_F_LRO; /* large receive offload */
1307 #endif
1308 
1309         /* setup temac private info structure */
1310         lp = netdev_priv(ndev);
1311         lp->ndev = ndev;
1312         lp->dev = &pdev->dev;
1313         lp->options = XTE_OPTION_DEFAULTS;
1314         spin_lock_init(&lp->rx_lock);
1315         INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1316 
1317         /* Setup mutex for synchronization of indirect register access */
1318         if (pdata) {
1319                 if (!pdata->indirect_lock) {
1320                         dev_err(&pdev->dev,
1321                                 "indirect_lock missing in platform_data\n");
1322                         return -EINVAL;
1323                 }
1324                 lp->indirect_lock = pdata->indirect_lock;
1325         } else {
1326                 lp->indirect_lock = devm_kmalloc(&pdev->dev,
1327                                                  sizeof(*lp->indirect_lock),
1328                                                  GFP_KERNEL);
1329                 spin_lock_init(lp->indirect_lock);
1330         }
1331 
1332         /* map device registers */
1333         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1334         lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
1335                                         resource_size(res));
1336         if (IS_ERR(lp->regs)) {
1337                 dev_err(&pdev->dev, "could not map TEMAC registers\n");
1338                 return PTR_ERR(lp->regs);
1339         }
1340 
1341         /* Select register access functions with the specified
1342          * endianness mode.  Default for OF devices is big-endian.
1343          */
1344         little_endian = false;
1345         if (temac_np) {
1346                 if (of_get_property(temac_np, "little-endian", NULL))
1347                         little_endian = true;
1348         } else if (pdata) {
1349                 little_endian = pdata->reg_little_endian;
1350         }
1351         if (little_endian) {
1352                 lp->temac_ior = _temac_ior_le;
1353                 lp->temac_iow = _temac_iow_le;
1354         } else {
1355                 lp->temac_ior = _temac_ior_be;
1356                 lp->temac_iow = _temac_iow_be;
1357         }
1358 
1359         /* Setup checksum offload, but default to off if not specified */
1360         lp->temac_features = 0;
1361         if (temac_np) {
1362                 p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1363                 if (p && be32_to_cpu(*p))
1364                         lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1365                 p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1366                 if (p && be32_to_cpu(*p))
1367                         lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1368         } else if (pdata) {
1369                 if (pdata->txcsum)
1370                         lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1371                 if (pdata->rxcsum)
1372                         lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1373         }
1374         if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1375                 /* Can checksum TCP/UDP over IPv4. */
1376                 ndev->features |= NETIF_F_IP_CSUM;
1377 
1378         /* Setup LocalLink DMA */
1379         if (temac_np) {
1380                 /* Find the DMA node, map the DMA registers, and
1381                  * decode the DMA IRQs.
1382                  */
1383                 dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1384                 if (!dma_np) {
1385                         dev_err(&pdev->dev, "could not find DMA node\n");
1386                         return -ENODEV;
1387                 }
1388 
1389                 /* Setup the DMA register accesses, could be DCR or
1390                  * memory mapped.
1391                  */
1392                 if (temac_dcr_setup(lp, pdev, dma_np)) {
1393                         /* no DCR in the device tree, try non-DCR */
1394                         lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1395                                                       NULL);
1396                         if (IS_ERR(lp->sdma_regs)) {
1397                                 dev_err(&pdev->dev,
1398                                         "unable to map DMA registers\n");
1399                                 of_node_put(dma_np);
1400                                 return PTR_ERR(lp->sdma_regs);
1401                         }
1402                         if (of_get_property(dma_np, "little-endian", NULL)) {
1403                                 lp->dma_in = temac_dma_in32_le;
1404                                 lp->dma_out = temac_dma_out32_le;
1405                         } else {
1406                                 lp->dma_in = temac_dma_in32_be;
1407                                 lp->dma_out = temac_dma_out32_be;
1408                         }
1409                         dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1410                 }
1411 
1412                 /* Get DMA RX and TX interrupts */
1413                 lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1414                 lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1415 
1416                 /* Use defaults for IRQ delay/coalescing setup.  These
1417                  * are configuration values, so does not belong in
1418                  * device-tree.
1419                  */
1420                 lp->tx_chnl_ctrl = 0x10220000;
1421                 lp->rx_chnl_ctrl = 0xff070000;
1422                 lp->coalesce_count_rx = 0x07;
1423 
1424                 /* Finished with the DMA node; drop the reference */
1425                 of_node_put(dma_np);
1426         } else if (pdata) {
1427                 /* 2nd memory resource specifies DMA registers */
1428                 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1429                 lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
1430                                                      resource_size(res));
1431                 if (IS_ERR(lp->sdma_regs)) {
1432                         dev_err(&pdev->dev,
1433                                 "could not map DMA registers\n");
1434                         return PTR_ERR(lp->sdma_regs);
1435                 }
1436                 if (pdata->dma_little_endian) {
1437                         lp->dma_in = temac_dma_in32_le;
1438                         lp->dma_out = temac_dma_out32_le;
1439                 } else {
1440                         lp->dma_in = temac_dma_in32_be;
1441                         lp->dma_out = temac_dma_out32_be;
1442                 }
1443 
1444                 /* Get DMA RX and TX interrupts */
1445                 lp->rx_irq = platform_get_irq(pdev, 0);
1446                 lp->tx_irq = platform_get_irq(pdev, 1);
1447 
1448                 /* IRQ delay/coalescing setup */
1449                 if (pdata->tx_irq_timeout || pdata->tx_irq_count)
1450                         lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
1451                                 (pdata->tx_irq_count << 16);
1452                 else
1453                         lp->tx_chnl_ctrl = 0x10220000;
1454                 if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1455                         lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1456                                 (pdata->rx_irq_count << 16);
1457                         lp->coalesce_count_rx = pdata->rx_irq_count;
1458                 } else {
1459                         lp->rx_chnl_ctrl = 0xff070000;
1460                         lp->coalesce_count_rx = 0x07;
1461                 }
1462         }
1463 
1464         /* Error handle returned DMA RX and TX interrupts */
1465         if (lp->rx_irq < 0) {
1466                 if (lp->rx_irq != -EPROBE_DEFER)
1467                         dev_err(&pdev->dev, "could not get DMA RX irq\n");
1468                 return lp->rx_irq;
1469         }
1470         if (lp->tx_irq < 0) {
1471                 if (lp->tx_irq != -EPROBE_DEFER)
1472                         dev_err(&pdev->dev, "could not get DMA TX irq\n");
1473                 return lp->tx_irq;
1474         }
1475 
1476         if (temac_np) {
1477                 /* Retrieve the MAC address */
1478                 addr = of_get_mac_address(temac_np);
1479                 if (IS_ERR(addr)) {
1480                         dev_err(&pdev->dev, "could not find MAC address\n");
1481                         return -ENODEV;
1482                 }
1483                 temac_init_mac_address(ndev, addr);
1484         } else if (pdata) {
1485                 temac_init_mac_address(ndev, pdata->mac_addr);
1486         }
1487 
1488         rc = temac_mdio_setup(lp, pdev);
1489         if (rc)
1490                 dev_warn(&pdev->dev, "error registering MDIO bus\n");
1491 
1492         if (temac_np) {
1493                 lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1494                 if (lp->phy_node)
1495                         dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1496         } else if (pdata) {
1497                 snprintf(lp->phy_name, sizeof(lp->phy_name),
1498                          PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1499                 lp->phy_interface = pdata->phy_interface;
1500         }
1501 
1502         /* Add the device attributes */
1503         rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1504         if (rc) {
1505                 dev_err(lp->dev, "Error creating sysfs files\n");
1506                 goto err_sysfs_create;
1507         }
1508 
1509         rc = register_netdev(lp->ndev);
1510         if (rc) {
1511                 dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1512                 goto err_register_ndev;
1513         }
1514 
1515         return 0;
1516 
1517 err_register_ndev:
1518         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1519 err_sysfs_create:
1520         if (lp->phy_node)
1521                 of_node_put(lp->phy_node);
1522         temac_mdio_teardown(lp);
1523         return rc;
1524 }
1525 
1526 static int temac_remove(struct platform_device *pdev)
1527 {
1528         struct net_device *ndev = platform_get_drvdata(pdev);
1529         struct temac_local *lp = netdev_priv(ndev);
1530 
1531         unregister_netdev(ndev);
1532         sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1533         if (lp->phy_node)
1534                 of_node_put(lp->phy_node);
1535         temac_mdio_teardown(lp);
1536         return 0;
1537 }
1538 
1539 static const struct of_device_id temac_of_match[] = {
1540         { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1541         { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1542         { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1543         { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1544         {},
1545 };
1546 MODULE_DEVICE_TABLE(of, temac_of_match);
1547 
1548 static struct platform_driver temac_driver = {
1549         .probe = temac_probe,
1550         .remove = temac_remove,
1551         .driver = {
1552                 .name = "xilinx_temac",
1553                 .of_match_table = temac_of_match,
1554         },
1555 };
1556 
1557 module_platform_driver(temac_driver);
1558 
1559 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1560 MODULE_AUTHOR("Yoshio Kashiwagi");
1561 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */