root/drivers/net/ethernet/xscale/ixp4xx_eth.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. memcpy_swab32
  2. ixp_ptp_match
  3. ixp_rx_timestamp
  4. ixp_tx_timestamp
  5. hwtstamp_set
  6. hwtstamp_get
  7. ixp4xx_mdio_cmd
  8. ixp4xx_mdio_read
  9. ixp4xx_mdio_write
  10. ixp4xx_mdio_register
  11. ixp4xx_mdio_remove
  12. ixp4xx_adjust_link
  13. debug_pkt
  14. debug_desc
  15. queue_get_desc
  16. queue_put_desc
  17. dma_unmap_tx
  18. eth_rx_irq
  19. eth_poll
  20. eth_txdone_irq
  21. eth_xmit
  22. eth_set_mcast_list
  23. eth_ioctl
  24. ixp4xx_get_drvinfo
  25. ixp4xx_get_ts_info
  26. request_queues
  27. release_queues
  28. init_queues
  29. destroy_queues
  30. eth_open
  31. eth_close
  32. ixp4xx_eth_probe
  33. ixp4xx_eth_remove

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Intel IXP4xx Ethernet driver for Linux
   4  *
   5  * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
   6  *
   7  * Ethernet port config (0x00 is not present on IXP42X):
   8  *
   9  * logical port         0x00            0x10            0x20
  10  * NPE                  0 (NPE-A)       1 (NPE-B)       2 (NPE-C)
  11  * physical PortId      2               0               1
  12  * TX queue             23              24              25
  13  * RX-free queue        26              27              28
  14  * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
  15  *
  16  * Queue entries:
  17  * bits 0 -> 1  - NPE ID (RX and TX-done)
  18  * bits 0 -> 2  - priority (TX, per 802.1D)
  19  * bits 3 -> 4  - port ID (user-set?)
  20  * bits 5 -> 31 - physical descriptor address
  21  */
  22 
  23 #include <linux/delay.h>
  24 #include <linux/dma-mapping.h>
  25 #include <linux/dmapool.h>
  26 #include <linux/etherdevice.h>
  27 #include <linux/io.h>
  28 #include <linux/kernel.h>
  29 #include <linux/net_tstamp.h>
  30 #include <linux/of.h>
  31 #include <linux/phy.h>
  32 #include <linux/platform_device.h>
  33 #include <linux/ptp_classify.h>
  34 #include <linux/slab.h>
  35 #include <linux/module.h>
  36 #include <mach/ixp46x_ts.h>
  37 #include <linux/soc/ixp4xx/npe.h>
  38 #include <linux/soc/ixp4xx/qmgr.h>
  39 
  40 #define DEBUG_DESC              0
  41 #define DEBUG_RX                0
  42 #define DEBUG_TX                0
  43 #define DEBUG_PKT_BYTES         0
  44 #define DEBUG_MDIO              0
  45 #define DEBUG_CLOSE             0
  46 
  47 #define DRV_NAME                "ixp4xx_eth"
  48 
  49 #define MAX_NPES                3
  50 
  51 #define RX_DESCS                64 /* also length of all RX queues */
  52 #define TX_DESCS                16 /* also length of all TX queues */
  53 #define TXDONE_QUEUE_LEN        64 /* dwords */
  54 
  55 #define POOL_ALLOC_SIZE         (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
  56 #define REGS_SIZE               0x1000
  57 #define MAX_MRU                 1536 /* 0x600 */
  58 #define RX_BUFF_SIZE            ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
  59 
  60 #define NAPI_WEIGHT             16
  61 #define MDIO_INTERVAL           (3 * HZ)
  62 #define MAX_MDIO_RETRIES        100 /* microseconds, typically 30 cycles */
  63 #define MAX_CLOSE_WAIT          1000 /* microseconds, typically 2-3 cycles */
  64 
  65 #define NPE_ID(port_id)         ((port_id) >> 4)
  66 #define PHYSICAL_ID(port_id)    ((NPE_ID(port_id) + 2) % 3)
  67 #define TX_QUEUE(port_id)       (NPE_ID(port_id) + 23)
  68 #define RXFREE_QUEUE(port_id)   (NPE_ID(port_id) + 26)
  69 #define TXDONE_QUEUE            31
  70 
  71 #define PTP_SLAVE_MODE          1
  72 #define PTP_MASTER_MODE         2
  73 #define PORT2CHANNEL(p)         NPE_ID(p->id)
  74 
  75 /* TX Control Registers */
  76 #define TX_CNTRL0_TX_EN         0x01
  77 #define TX_CNTRL0_HALFDUPLEX    0x02
  78 #define TX_CNTRL0_RETRY         0x04
  79 #define TX_CNTRL0_PAD_EN        0x08
  80 #define TX_CNTRL0_APPEND_FCS    0x10
  81 #define TX_CNTRL0_2DEFER        0x20
  82 #define TX_CNTRL0_RMII          0x40 /* reduced MII */
  83 #define TX_CNTRL1_RETRIES       0x0F /* 4 bits */
  84 
  85 /* RX Control Registers */
  86 #define RX_CNTRL0_RX_EN         0x01
  87 #define RX_CNTRL0_PADSTRIP_EN   0x02
  88 #define RX_CNTRL0_SEND_FCS      0x04
  89 #define RX_CNTRL0_PAUSE_EN      0x08
  90 #define RX_CNTRL0_LOOP_EN       0x10
  91 #define RX_CNTRL0_ADDR_FLTR_EN  0x20
  92 #define RX_CNTRL0_RX_RUNT_EN    0x40
  93 #define RX_CNTRL0_BCAST_DIS     0x80
  94 #define RX_CNTRL1_DEFER_EN      0x01
  95 
  96 /* Core Control Register */
  97 #define CORE_RESET              0x01
  98 #define CORE_RX_FIFO_FLUSH      0x02
  99 #define CORE_TX_FIFO_FLUSH      0x04
 100 #define CORE_SEND_JAM           0x08
 101 #define CORE_MDC_EN             0x10 /* MDIO using NPE-B ETH-0 only */
 102 
 103 #define DEFAULT_TX_CNTRL0       (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY |    \
 104                                  TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
 105                                  TX_CNTRL0_2DEFER)
 106 #define DEFAULT_RX_CNTRL0       RX_CNTRL0_RX_EN
 107 #define DEFAULT_CORE_CNTRL      CORE_MDC_EN
 108 
 109 
 110 /* NPE message codes */
 111 #define NPE_GETSTATUS                   0x00
 112 #define NPE_EDB_SETPORTADDRESS          0x01
 113 #define NPE_EDB_GETMACADDRESSDATABASE   0x02
 114 #define NPE_EDB_SETMACADDRESSSDATABASE  0x03
 115 #define NPE_GETSTATS                    0x04
 116 #define NPE_RESETSTATS                  0x05
 117 #define NPE_SETMAXFRAMELENGTHS          0x06
 118 #define NPE_VLAN_SETRXTAGMODE           0x07
 119 #define NPE_VLAN_SETDEFAULTRXVID        0x08
 120 #define NPE_VLAN_SETPORTVLANTABLEENTRY  0x09
 121 #define NPE_VLAN_SETPORTVLANTABLERANGE  0x0A
 122 #define NPE_VLAN_SETRXQOSENTRY          0x0B
 123 #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
 124 #define NPE_STP_SETBLOCKINGSTATE        0x0D
 125 #define NPE_FW_SETFIREWALLMODE          0x0E
 126 #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
 127 #define NPE_PC_SETAPMACTABLE            0x11
 128 #define NPE_SETLOOPBACK_MODE            0x12
 129 #define NPE_PC_SETBSSIDTABLE            0x13
 130 #define NPE_ADDRESS_FILTER_CONFIG       0x14
 131 #define NPE_APPENDFCSCONFIG             0x15
 132 #define NPE_NOTIFY_MAC_RECOVERY_DONE    0x16
 133 #define NPE_MAC_RECOVERY_START          0x17
 134 
 135 
 136 #ifdef __ARMEB__
 137 typedef struct sk_buff buffer_t;
 138 #define free_buffer dev_kfree_skb
 139 #define free_buffer_irq dev_consume_skb_irq
 140 #else
 141 typedef void buffer_t;
 142 #define free_buffer kfree
 143 #define free_buffer_irq kfree
 144 #endif
 145 
 146 struct eth_regs {
 147         u32 tx_control[2], __res1[2];           /* 000 */
 148         u32 rx_control[2], __res2[2];           /* 010 */
 149         u32 random_seed, __res3[3];             /* 020 */
 150         u32 partial_empty_threshold, __res4;    /* 030 */
 151         u32 partial_full_threshold, __res5;     /* 038 */
 152         u32 tx_start_bytes, __res6[3];          /* 040 */
 153         u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
 154         u32 tx_2part_deferral[2], __res8[2];    /* 060 */
 155         u32 slot_time, __res9[3];               /* 070 */
 156         u32 mdio_command[4];                    /* 080 */
 157         u32 mdio_status[4];                     /* 090 */
 158         u32 mcast_mask[6], __res10[2];          /* 0A0 */
 159         u32 mcast_addr[6], __res11[2];          /* 0C0 */
 160         u32 int_clock_threshold, __res12[3];    /* 0E0 */
 161         u32 hw_addr[6], __res13[61];            /* 0F0 */
 162         u32 core_control;                       /* 1FC */
 163 };
 164 
 165 struct port {
 166         struct resource *mem_res;
 167         struct eth_regs __iomem *regs;
 168         struct npe *npe;
 169         struct net_device *netdev;
 170         struct napi_struct napi;
 171         struct eth_plat_info *plat;
 172         buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
 173         struct desc *desc_tab;  /* coherent */
 174         u32 desc_tab_phys;
 175         int id;                 /* logical port ID */
 176         int speed, duplex;
 177         u8 firmware[4];
 178         int hwts_tx_en;
 179         int hwts_rx_en;
 180 };
 181 
 182 /* NPE message structure */
 183 struct msg {
 184 #ifdef __ARMEB__
 185         u8 cmd, eth_id, byte2, byte3;
 186         u8 byte4, byte5, byte6, byte7;
 187 #else
 188         u8 byte3, byte2, eth_id, cmd;
 189         u8 byte7, byte6, byte5, byte4;
 190 #endif
 191 };
 192 
 193 /* Ethernet packet descriptor */
 194 struct desc {
 195         u32 next;               /* pointer to next buffer, unused */
 196 
 197 #ifdef __ARMEB__
 198         u16 buf_len;            /* buffer length */
 199         u16 pkt_len;            /* packet length */
 200         u32 data;               /* pointer to data buffer in RAM */
 201         u8 dest_id;
 202         u8 src_id;
 203         u16 flags;
 204         u8 qos;
 205         u8 padlen;
 206         u16 vlan_tci;
 207 #else
 208         u16 pkt_len;            /* packet length */
 209         u16 buf_len;            /* buffer length */
 210         u32 data;               /* pointer to data buffer in RAM */
 211         u16 flags;
 212         u8 src_id;
 213         u8 dest_id;
 214         u16 vlan_tci;
 215         u8 padlen;
 216         u8 qos;
 217 #endif
 218 
 219 #ifdef __ARMEB__
 220         u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
 221         u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
 222         u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
 223 #else
 224         u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
 225         u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
 226         u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
 227 #endif
 228 };
 229 
 230 
 231 #define rx_desc_phys(port, n)   ((port)->desc_tab_phys +                \
 232                                  (n) * sizeof(struct desc))
 233 #define rx_desc_ptr(port, n)    (&(port)->desc_tab[n])
 234 
 235 #define tx_desc_phys(port, n)   ((port)->desc_tab_phys +                \
 236                                  ((n) + RX_DESCS) * sizeof(struct desc))
 237 #define tx_desc_ptr(port, n)    (&(port)->desc_tab[(n) + RX_DESCS])
 238 
 239 #ifndef __ARMEB__
 240 static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
 241 {
 242         int i;
 243         for (i = 0; i < cnt; i++)
 244                 dest[i] = swab32(src[i]);
 245 }
 246 #endif
 247 
 248 static spinlock_t mdio_lock;
 249 static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
 250 static struct mii_bus *mdio_bus;
 251 static int ports_open;
 252 static struct port *npe_port_tab[MAX_NPES];
 253 static struct dma_pool *dma_pool;
 254 
 255 static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
 256 {
 257         u8 *data = skb->data;
 258         unsigned int offset;
 259         u16 *hi, *id;
 260         u32 lo;
 261 
 262         if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
 263                 return 0;
 264 
 265         offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 266 
 267         if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
 268                 return 0;
 269 
 270         hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
 271         id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
 272 
 273         memcpy(&lo, &hi[1], sizeof(lo));
 274 
 275         return (uid_hi == ntohs(*hi) &&
 276                 uid_lo == ntohl(lo) &&
 277                 seqid  == ntohs(*id));
 278 }
 279 
 280 static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
 281 {
 282         struct skb_shared_hwtstamps *shhwtstamps;
 283         struct ixp46x_ts_regs *regs;
 284         u64 ns;
 285         u32 ch, hi, lo, val;
 286         u16 uid, seq;
 287 
 288         if (!port->hwts_rx_en)
 289                 return;
 290 
 291         ch = PORT2CHANNEL(port);
 292 
 293         regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
 294 
 295         val = __raw_readl(&regs->channel[ch].ch_event);
 296 
 297         if (!(val & RX_SNAPSHOT_LOCKED))
 298                 return;
 299 
 300         lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
 301         hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
 302 
 303         uid = hi & 0xffff;
 304         seq = (hi >> 16) & 0xffff;
 305 
 306         if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
 307                 goto out;
 308 
 309         lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
 310         hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
 311         ns = ((u64) hi) << 32;
 312         ns |= lo;
 313         ns <<= TICKS_NS_SHIFT;
 314 
 315         shhwtstamps = skb_hwtstamps(skb);
 316         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 317         shhwtstamps->hwtstamp = ns_to_ktime(ns);
 318 out:
 319         __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
 320 }
 321 
 322 static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
 323 {
 324         struct skb_shared_hwtstamps shhwtstamps;
 325         struct ixp46x_ts_regs *regs;
 326         struct skb_shared_info *shtx;
 327         u64 ns;
 328         u32 ch, cnt, hi, lo, val;
 329 
 330         shtx = skb_shinfo(skb);
 331         if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
 332                 shtx->tx_flags |= SKBTX_IN_PROGRESS;
 333         else
 334                 return;
 335 
 336         ch = PORT2CHANNEL(port);
 337 
 338         regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
 339 
 340         /*
 341          * This really stinks, but we have to poll for the Tx time stamp.
 342          * Usually, the time stamp is ready after 4 to 6 microseconds.
 343          */
 344         for (cnt = 0; cnt < 100; cnt++) {
 345                 val = __raw_readl(&regs->channel[ch].ch_event);
 346                 if (val & TX_SNAPSHOT_LOCKED)
 347                         break;
 348                 udelay(1);
 349         }
 350         if (!(val & TX_SNAPSHOT_LOCKED)) {
 351                 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
 352                 return;
 353         }
 354 
 355         lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
 356         hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
 357         ns = ((u64) hi) << 32;
 358         ns |= lo;
 359         ns <<= TICKS_NS_SHIFT;
 360 
 361         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 362         shhwtstamps.hwtstamp = ns_to_ktime(ns);
 363         skb_tstamp_tx(skb, &shhwtstamps);
 364 
 365         __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
 366 }
 367 
 368 static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
 369 {
 370         struct hwtstamp_config cfg;
 371         struct ixp46x_ts_regs *regs;
 372         struct port *port = netdev_priv(netdev);
 373         int ch;
 374 
 375         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 376                 return -EFAULT;
 377 
 378         if (cfg.flags) /* reserved for future extensions */
 379                 return -EINVAL;
 380 
 381         ch = PORT2CHANNEL(port);
 382         regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
 383 
 384         if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 385                 return -ERANGE;
 386 
 387         switch (cfg.rx_filter) {
 388         case HWTSTAMP_FILTER_NONE:
 389                 port->hwts_rx_en = 0;
 390                 break;
 391         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 392                 port->hwts_rx_en = PTP_SLAVE_MODE;
 393                 __raw_writel(0, &regs->channel[ch].ch_control);
 394                 break;
 395         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 396                 port->hwts_rx_en = PTP_MASTER_MODE;
 397                 __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
 398                 break;
 399         default:
 400                 return -ERANGE;
 401         }
 402 
 403         port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
 404 
 405         /* Clear out any old time stamps. */
 406         __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
 407                      &regs->channel[ch].ch_event);
 408 
 409         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 410 }
 411 
 412 static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
 413 {
 414         struct hwtstamp_config cfg;
 415         struct port *port = netdev_priv(netdev);
 416 
 417         cfg.flags = 0;
 418         cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
 419 
 420         switch (port->hwts_rx_en) {
 421         case 0:
 422                 cfg.rx_filter = HWTSTAMP_FILTER_NONE;
 423                 break;
 424         case PTP_SLAVE_MODE:
 425                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
 426                 break;
 427         case PTP_MASTER_MODE:
 428                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
 429                 break;
 430         default:
 431                 WARN_ON_ONCE(1);
 432                 return -ERANGE;
 433         }
 434 
 435         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 436 }
 437 
 438 static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
 439                            int write, u16 cmd)
 440 {
 441         int cycles = 0;
 442 
 443         if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
 444                 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
 445                 return -1;
 446         }
 447 
 448         if (write) {
 449                 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
 450                 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
 451         }
 452         __raw_writel(((phy_id << 5) | location) & 0xFF,
 453                      &mdio_regs->mdio_command[2]);
 454         __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
 455                      &mdio_regs->mdio_command[3]);
 456 
 457         while ((cycles < MAX_MDIO_RETRIES) &&
 458                (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
 459                 udelay(1);
 460                 cycles++;
 461         }
 462 
 463         if (cycles == MAX_MDIO_RETRIES) {
 464                 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
 465                        phy_id);
 466                 return -1;
 467         }
 468 
 469 #if DEBUG_MDIO
 470         printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
 471                phy_id, write ? "write" : "read", cycles);
 472 #endif
 473 
 474         if (write)
 475                 return 0;
 476 
 477         if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
 478 #if DEBUG_MDIO
 479                 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
 480                        phy_id);
 481 #endif
 482                 return 0xFFFF; /* don't return error */
 483         }
 484 
 485         return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
 486                 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
 487 }
 488 
 489 static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
 490 {
 491         unsigned long flags;
 492         int ret;
 493 
 494         spin_lock_irqsave(&mdio_lock, flags);
 495         ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
 496         spin_unlock_irqrestore(&mdio_lock, flags);
 497 #if DEBUG_MDIO
 498         printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
 499                phy_id, location, ret);
 500 #endif
 501         return ret;
 502 }
 503 
 504 static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
 505                              u16 val)
 506 {
 507         unsigned long flags;
 508         int ret;
 509 
 510         spin_lock_irqsave(&mdio_lock, flags);
 511         ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
 512         spin_unlock_irqrestore(&mdio_lock, flags);
 513 #if DEBUG_MDIO
 514         printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
 515                bus->name, phy_id, location, val, ret);
 516 #endif
 517         return ret;
 518 }
 519 
 520 static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
 521 {
 522         int err;
 523 
 524         if (!(mdio_bus = mdiobus_alloc()))
 525                 return -ENOMEM;
 526 
 527         mdio_regs = regs;
 528         __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
 529         spin_lock_init(&mdio_lock);
 530         mdio_bus->name = "IXP4xx MII Bus";
 531         mdio_bus->read = &ixp4xx_mdio_read;
 532         mdio_bus->write = &ixp4xx_mdio_write;
 533         snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
 534 
 535         if ((err = mdiobus_register(mdio_bus)))
 536                 mdiobus_free(mdio_bus);
 537         return err;
 538 }
 539 
 540 static void ixp4xx_mdio_remove(void)
 541 {
 542         mdiobus_unregister(mdio_bus);
 543         mdiobus_free(mdio_bus);
 544 }
 545 
 546 
 547 static void ixp4xx_adjust_link(struct net_device *dev)
 548 {
 549         struct port *port = netdev_priv(dev);
 550         struct phy_device *phydev = dev->phydev;
 551 
 552         if (!phydev->link) {
 553                 if (port->speed) {
 554                         port->speed = 0;
 555                         printk(KERN_INFO "%s: link down\n", dev->name);
 556                 }
 557                 return;
 558         }
 559 
 560         if (port->speed == phydev->speed && port->duplex == phydev->duplex)
 561                 return;
 562 
 563         port->speed = phydev->speed;
 564         port->duplex = phydev->duplex;
 565 
 566         if (port->duplex)
 567                 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
 568                              &port->regs->tx_control[0]);
 569         else
 570                 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
 571                              &port->regs->tx_control[0]);
 572 
 573         printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
 574                dev->name, port->speed, port->duplex ? "full" : "half");
 575 }
 576 
 577 
 578 static inline void debug_pkt(struct net_device *dev, const char *func,
 579                              u8 *data, int len)
 580 {
 581 #if DEBUG_PKT_BYTES
 582         int i;
 583 
 584         printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
 585         for (i = 0; i < len; i++) {
 586                 if (i >= DEBUG_PKT_BYTES)
 587                         break;
 588                 printk("%s%02X",
 589                        ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
 590                        data[i]);
 591         }
 592         printk("\n");
 593 #endif
 594 }
 595 
 596 
 597 static inline void debug_desc(u32 phys, struct desc *desc)
 598 {
 599 #if DEBUG_DESC
 600         printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
 601                " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
 602                phys, desc->next, desc->buf_len, desc->pkt_len,
 603                desc->data, desc->dest_id, desc->src_id, desc->flags,
 604                desc->qos, desc->padlen, desc->vlan_tci,
 605                desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
 606                desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
 607                desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
 608                desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
 609 #endif
 610 }
 611 
 612 static inline int queue_get_desc(unsigned int queue, struct port *port,
 613                                  int is_tx)
 614 {
 615         u32 phys, tab_phys, n_desc;
 616         struct desc *tab;
 617 
 618         if (!(phys = qmgr_get_entry(queue)))
 619                 return -1;
 620 
 621         phys &= ~0x1F; /* mask out non-address bits */
 622         tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
 623         tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
 624         n_desc = (phys - tab_phys) / sizeof(struct desc);
 625         BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
 626         debug_desc(phys, &tab[n_desc]);
 627         BUG_ON(tab[n_desc].next);
 628         return n_desc;
 629 }
 630 
 631 static inline void queue_put_desc(unsigned int queue, u32 phys,
 632                                   struct desc *desc)
 633 {
 634         debug_desc(phys, desc);
 635         BUG_ON(phys & 0x1F);
 636         qmgr_put_entry(queue, phys);
 637         /* Don't check for queue overflow here, we've allocated sufficient
 638            length and queues >= 32 don't support this check anyway. */
 639 }
 640 
 641 
 642 static inline void dma_unmap_tx(struct port *port, struct desc *desc)
 643 {
 644 #ifdef __ARMEB__
 645         dma_unmap_single(&port->netdev->dev, desc->data,
 646                          desc->buf_len, DMA_TO_DEVICE);
 647 #else
 648         dma_unmap_single(&port->netdev->dev, desc->data & ~3,
 649                          ALIGN((desc->data & 3) + desc->buf_len, 4),
 650                          DMA_TO_DEVICE);
 651 #endif
 652 }
 653 
 654 
 655 static void eth_rx_irq(void *pdev)
 656 {
 657         struct net_device *dev = pdev;
 658         struct port *port = netdev_priv(dev);
 659 
 660 #if DEBUG_RX
 661         printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
 662 #endif
 663         qmgr_disable_irq(port->plat->rxq);
 664         napi_schedule(&port->napi);
 665 }
 666 
 667 static int eth_poll(struct napi_struct *napi, int budget)
 668 {
 669         struct port *port = container_of(napi, struct port, napi);
 670         struct net_device *dev = port->netdev;
 671         unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
 672         int received = 0;
 673 
 674 #if DEBUG_RX
 675         printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
 676 #endif
 677 
 678         while (received < budget) {
 679                 struct sk_buff *skb;
 680                 struct desc *desc;
 681                 int n;
 682 #ifdef __ARMEB__
 683                 struct sk_buff *temp;
 684                 u32 phys;
 685 #endif
 686 
 687                 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
 688 #if DEBUG_RX
 689                         printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
 690                                dev->name);
 691 #endif
 692                         napi_complete(napi);
 693                         qmgr_enable_irq(rxq);
 694                         if (!qmgr_stat_below_low_watermark(rxq) &&
 695                             napi_reschedule(napi)) { /* not empty again */
 696 #if DEBUG_RX
 697                                 printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
 698                                        dev->name);
 699 #endif
 700                                 qmgr_disable_irq(rxq);
 701                                 continue;
 702                         }
 703 #if DEBUG_RX
 704                         printk(KERN_DEBUG "%s: eth_poll all done\n",
 705                                dev->name);
 706 #endif
 707                         return received; /* all work done */
 708                 }
 709 
 710                 desc = rx_desc_ptr(port, n);
 711 
 712 #ifdef __ARMEB__
 713                 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
 714                         phys = dma_map_single(&dev->dev, skb->data,
 715                                               RX_BUFF_SIZE, DMA_FROM_DEVICE);
 716                         if (dma_mapping_error(&dev->dev, phys)) {
 717                                 dev_kfree_skb(skb);
 718                                 skb = NULL;
 719                         }
 720                 }
 721 #else
 722                 skb = netdev_alloc_skb(dev,
 723                                        ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
 724 #endif
 725 
 726                 if (!skb) {
 727                         dev->stats.rx_dropped++;
 728                         /* put the desc back on RX-ready queue */
 729                         desc->buf_len = MAX_MRU;
 730                         desc->pkt_len = 0;
 731                         queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
 732                         continue;
 733                 }
 734 
 735                 /* process received frame */
 736 #ifdef __ARMEB__
 737                 temp = skb;
 738                 skb = port->rx_buff_tab[n];
 739                 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
 740                                  RX_BUFF_SIZE, DMA_FROM_DEVICE);
 741 #else
 742                 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
 743                                         RX_BUFF_SIZE, DMA_FROM_DEVICE);
 744                 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
 745                               ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
 746 #endif
 747                 skb_reserve(skb, NET_IP_ALIGN);
 748                 skb_put(skb, desc->pkt_len);
 749 
 750                 debug_pkt(dev, "eth_poll", skb->data, skb->len);
 751 
 752                 ixp_rx_timestamp(port, skb);
 753                 skb->protocol = eth_type_trans(skb, dev);
 754                 dev->stats.rx_packets++;
 755                 dev->stats.rx_bytes += skb->len;
 756                 netif_receive_skb(skb);
 757 
 758                 /* put the new buffer on RX-free queue */
 759 #ifdef __ARMEB__
 760                 port->rx_buff_tab[n] = temp;
 761                 desc->data = phys + NET_IP_ALIGN;
 762 #endif
 763                 desc->buf_len = MAX_MRU;
 764                 desc->pkt_len = 0;
 765                 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
 766                 received++;
 767         }
 768 
 769 #if DEBUG_RX
 770         printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
 771 #endif
 772         return received;                /* not all work done */
 773 }
 774 
 775 
 776 static void eth_txdone_irq(void *unused)
 777 {
 778         u32 phys;
 779 
 780 #if DEBUG_TX
 781         printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
 782 #endif
 783         while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
 784                 u32 npe_id, n_desc;
 785                 struct port *port;
 786                 struct desc *desc;
 787                 int start;
 788 
 789                 npe_id = phys & 3;
 790                 BUG_ON(npe_id >= MAX_NPES);
 791                 port = npe_port_tab[npe_id];
 792                 BUG_ON(!port);
 793                 phys &= ~0x1F; /* mask out non-address bits */
 794                 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
 795                 BUG_ON(n_desc >= TX_DESCS);
 796                 desc = tx_desc_ptr(port, n_desc);
 797                 debug_desc(phys, desc);
 798 
 799                 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
 800                         port->netdev->stats.tx_packets++;
 801                         port->netdev->stats.tx_bytes += desc->pkt_len;
 802 
 803                         dma_unmap_tx(port, desc);
 804 #if DEBUG_TX
 805                         printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
 806                                port->netdev->name, port->tx_buff_tab[n_desc]);
 807 #endif
 808                         free_buffer_irq(port->tx_buff_tab[n_desc]);
 809                         port->tx_buff_tab[n_desc] = NULL;
 810                 }
 811 
 812                 start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
 813                 queue_put_desc(port->plat->txreadyq, phys, desc);
 814                 if (start) { /* TX-ready queue was empty */
 815 #if DEBUG_TX
 816                         printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
 817                                port->netdev->name);
 818 #endif
 819                         netif_wake_queue(port->netdev);
 820                 }
 821         }
 822 }
 823 
 824 static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
 825 {
 826         struct port *port = netdev_priv(dev);
 827         unsigned int txreadyq = port->plat->txreadyq;
 828         int len, offset, bytes, n;
 829         void *mem;
 830         u32 phys;
 831         struct desc *desc;
 832 
 833 #if DEBUG_TX
 834         printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
 835 #endif
 836 
 837         if (unlikely(skb->len > MAX_MRU)) {
 838                 dev_kfree_skb(skb);
 839                 dev->stats.tx_errors++;
 840                 return NETDEV_TX_OK;
 841         }
 842 
 843         debug_pkt(dev, "eth_xmit", skb->data, skb->len);
 844 
 845         len = skb->len;
 846 #ifdef __ARMEB__
 847         offset = 0; /* no need to keep alignment */
 848         bytes = len;
 849         mem = skb->data;
 850 #else
 851         offset = (int)skb->data & 3; /* keep 32-bit alignment */
 852         bytes = ALIGN(offset + len, 4);
 853         if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
 854                 dev_kfree_skb(skb);
 855                 dev->stats.tx_dropped++;
 856                 return NETDEV_TX_OK;
 857         }
 858         memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
 859 #endif
 860 
 861         phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
 862         if (dma_mapping_error(&dev->dev, phys)) {
 863                 dev_kfree_skb(skb);
 864 #ifndef __ARMEB__
 865                 kfree(mem);
 866 #endif
 867                 dev->stats.tx_dropped++;
 868                 return NETDEV_TX_OK;
 869         }
 870 
 871         n = queue_get_desc(txreadyq, port, 1);
 872         BUG_ON(n < 0);
 873         desc = tx_desc_ptr(port, n);
 874 
 875 #ifdef __ARMEB__
 876         port->tx_buff_tab[n] = skb;
 877 #else
 878         port->tx_buff_tab[n] = mem;
 879 #endif
 880         desc->data = phys + offset;
 881         desc->buf_len = desc->pkt_len = len;
 882 
 883         /* NPE firmware pads short frames with zeros internally */
 884         wmb();
 885         queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
 886 
 887         if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
 888 #if DEBUG_TX
 889                 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
 890 #endif
 891                 netif_stop_queue(dev);
 892                 /* we could miss TX ready interrupt */
 893                 /* really empty in fact */
 894                 if (!qmgr_stat_below_low_watermark(txreadyq)) {
 895 #if DEBUG_TX
 896                         printk(KERN_DEBUG "%s: eth_xmit ready again\n",
 897                                dev->name);
 898 #endif
 899                         netif_wake_queue(dev);
 900                 }
 901         }
 902 
 903 #if DEBUG_TX
 904         printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
 905 #endif
 906 
 907         ixp_tx_timestamp(port, skb);
 908         skb_tx_timestamp(skb);
 909 
 910 #ifndef __ARMEB__
 911         dev_kfree_skb(skb);
 912 #endif
 913         return NETDEV_TX_OK;
 914 }
 915 
 916 
 917 static void eth_set_mcast_list(struct net_device *dev)
 918 {
 919         struct port *port = netdev_priv(dev);
 920         struct netdev_hw_addr *ha;
 921         u8 diffs[ETH_ALEN], *addr;
 922         int i;
 923         static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
 924 
 925         if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
 926                 for (i = 0; i < ETH_ALEN; i++) {
 927                         __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
 928                         __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
 929                 }
 930                 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
 931                         &port->regs->rx_control[0]);
 932                 return;
 933         }
 934 
 935         if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
 936                 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
 937                              &port->regs->rx_control[0]);
 938                 return;
 939         }
 940 
 941         eth_zero_addr(diffs);
 942 
 943         addr = NULL;
 944         netdev_for_each_mc_addr(ha, dev) {
 945                 if (!addr)
 946                         addr = ha->addr; /* first MAC address */
 947                 for (i = 0; i < ETH_ALEN; i++)
 948                         diffs[i] |= addr[i] ^ ha->addr[i];
 949         }
 950 
 951         for (i = 0; i < ETH_ALEN; i++) {
 952                 __raw_writel(addr[i], &port->regs->mcast_addr[i]);
 953                 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
 954         }
 955 
 956         __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
 957                      &port->regs->rx_control[0]);
 958 }
 959 
 960 
 961 static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 962 {
 963         if (!netif_running(dev))
 964                 return -EINVAL;
 965 
 966         if (cpu_is_ixp46x()) {
 967                 if (cmd == SIOCSHWTSTAMP)
 968                         return hwtstamp_set(dev, req);
 969                 if (cmd == SIOCGHWTSTAMP)
 970                         return hwtstamp_get(dev, req);
 971         }
 972 
 973         return phy_mii_ioctl(dev->phydev, req, cmd);
 974 }
 975 
 976 /* ethtool support */
 977 
 978 static void ixp4xx_get_drvinfo(struct net_device *dev,
 979                                struct ethtool_drvinfo *info)
 980 {
 981         struct port *port = netdev_priv(dev);
 982 
 983         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 984         snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
 985                  port->firmware[0], port->firmware[1],
 986                  port->firmware[2], port->firmware[3]);
 987         strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
 988 }
 989 
 990 int ixp46x_phc_index = -1;
 991 EXPORT_SYMBOL_GPL(ixp46x_phc_index);
 992 
 993 static int ixp4xx_get_ts_info(struct net_device *dev,
 994                               struct ethtool_ts_info *info)
 995 {
 996         if (!cpu_is_ixp46x()) {
 997                 info->so_timestamping =
 998                         SOF_TIMESTAMPING_TX_SOFTWARE |
 999                         SOF_TIMESTAMPING_RX_SOFTWARE |
1000                         SOF_TIMESTAMPING_SOFTWARE;
1001                 info->phc_index = -1;
1002                 return 0;
1003         }
1004         info->so_timestamping =
1005                 SOF_TIMESTAMPING_TX_HARDWARE |
1006                 SOF_TIMESTAMPING_RX_HARDWARE |
1007                 SOF_TIMESTAMPING_RAW_HARDWARE;
1008         info->phc_index = ixp46x_phc_index;
1009         info->tx_types =
1010                 (1 << HWTSTAMP_TX_OFF) |
1011                 (1 << HWTSTAMP_TX_ON);
1012         info->rx_filters =
1013                 (1 << HWTSTAMP_FILTER_NONE) |
1014                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1015                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
1016         return 0;
1017 }
1018 
1019 static const struct ethtool_ops ixp4xx_ethtool_ops = {
1020         .get_drvinfo = ixp4xx_get_drvinfo,
1021         .nway_reset = phy_ethtool_nway_reset,
1022         .get_link = ethtool_op_get_link,
1023         .get_ts_info = ixp4xx_get_ts_info,
1024         .get_link_ksettings = phy_ethtool_get_link_ksettings,
1025         .set_link_ksettings = phy_ethtool_set_link_ksettings,
1026 };
1027 
1028 
1029 static int request_queues(struct port *port)
1030 {
1031         int err;
1032 
1033         err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
1034                                  "%s:RX-free", port->netdev->name);
1035         if (err)
1036                 return err;
1037 
1038         err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
1039                                  "%s:RX", port->netdev->name);
1040         if (err)
1041                 goto rel_rxfree;
1042 
1043         err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
1044                                  "%s:TX", port->netdev->name);
1045         if (err)
1046                 goto rel_rx;
1047 
1048         err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
1049                                  "%s:TX-ready", port->netdev->name);
1050         if (err)
1051                 goto rel_tx;
1052 
1053         /* TX-done queue handles skbs sent out by the NPEs */
1054         if (!ports_open) {
1055                 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
1056                                          "%s:TX-done", DRV_NAME);
1057                 if (err)
1058                         goto rel_txready;
1059         }
1060         return 0;
1061 
1062 rel_txready:
1063         qmgr_release_queue(port->plat->txreadyq);
1064 rel_tx:
1065         qmgr_release_queue(TX_QUEUE(port->id));
1066 rel_rx:
1067         qmgr_release_queue(port->plat->rxq);
1068 rel_rxfree:
1069         qmgr_release_queue(RXFREE_QUEUE(port->id));
1070         printk(KERN_DEBUG "%s: unable to request hardware queues\n",
1071                port->netdev->name);
1072         return err;
1073 }
1074 
1075 static void release_queues(struct port *port)
1076 {
1077         qmgr_release_queue(RXFREE_QUEUE(port->id));
1078         qmgr_release_queue(port->plat->rxq);
1079         qmgr_release_queue(TX_QUEUE(port->id));
1080         qmgr_release_queue(port->plat->txreadyq);
1081 
1082         if (!ports_open)
1083                 qmgr_release_queue(TXDONE_QUEUE);
1084 }
1085 
1086 static int init_queues(struct port *port)
1087 {
1088         int i;
1089 
1090         if (!ports_open) {
1091                 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
1092                                            POOL_ALLOC_SIZE, 32, 0);
1093                 if (!dma_pool)
1094                         return -ENOMEM;
1095         }
1096 
1097         if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
1098                                               &port->desc_tab_phys)))
1099                 return -ENOMEM;
1100         memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
1101         memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
1102         memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
1103 
1104         /* Setup RX buffers */
1105         for (i = 0; i < RX_DESCS; i++) {
1106                 struct desc *desc = rx_desc_ptr(port, i);
1107                 buffer_t *buff; /* skb or kmalloc()ated memory */
1108                 void *data;
1109 #ifdef __ARMEB__
1110                 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
1111                         return -ENOMEM;
1112                 data = buff->data;
1113 #else
1114                 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
1115                         return -ENOMEM;
1116                 data = buff;
1117 #endif
1118                 desc->buf_len = MAX_MRU;
1119                 desc->data = dma_map_single(&port->netdev->dev, data,
1120                                             RX_BUFF_SIZE, DMA_FROM_DEVICE);
1121                 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
1122                         free_buffer(buff);
1123                         return -EIO;
1124                 }
1125                 desc->data += NET_IP_ALIGN;
1126                 port->rx_buff_tab[i] = buff;
1127         }
1128 
1129         return 0;
1130 }
1131 
1132 static void destroy_queues(struct port *port)
1133 {
1134         int i;
1135 
1136         if (port->desc_tab) {
1137                 for (i = 0; i < RX_DESCS; i++) {
1138                         struct desc *desc = rx_desc_ptr(port, i);
1139                         buffer_t *buff = port->rx_buff_tab[i];
1140                         if (buff) {
1141                                 dma_unmap_single(&port->netdev->dev,
1142                                                  desc->data - NET_IP_ALIGN,
1143                                                  RX_BUFF_SIZE, DMA_FROM_DEVICE);
1144                                 free_buffer(buff);
1145                         }
1146                 }
1147                 for (i = 0; i < TX_DESCS; i++) {
1148                         struct desc *desc = tx_desc_ptr(port, i);
1149                         buffer_t *buff = port->tx_buff_tab[i];
1150                         if (buff) {
1151                                 dma_unmap_tx(port, desc);
1152                                 free_buffer(buff);
1153                         }
1154                 }
1155                 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1156                 port->desc_tab = NULL;
1157         }
1158 
1159         if (!ports_open && dma_pool) {
1160                 dma_pool_destroy(dma_pool);
1161                 dma_pool = NULL;
1162         }
1163 }
1164 
1165 static int eth_open(struct net_device *dev)
1166 {
1167         struct port *port = netdev_priv(dev);
1168         struct npe *npe = port->npe;
1169         struct msg msg;
1170         int i, err;
1171 
1172         if (!npe_running(npe)) {
1173                 err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
1174                 if (err)
1175                         return err;
1176 
1177                 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
1178                         printk(KERN_ERR "%s: %s not responding\n", dev->name,
1179                                npe_name(npe));
1180                         return -EIO;
1181                 }
1182                 port->firmware[0] = msg.byte4;
1183                 port->firmware[1] = msg.byte5;
1184                 port->firmware[2] = msg.byte6;
1185                 port->firmware[3] = msg.byte7;
1186         }
1187 
1188         memset(&msg, 0, sizeof(msg));
1189         msg.cmd = NPE_VLAN_SETRXQOSENTRY;
1190         msg.eth_id = port->id;
1191         msg.byte5 = port->plat->rxq | 0x80;
1192         msg.byte7 = port->plat->rxq << 4;
1193         for (i = 0; i < 8; i++) {
1194                 msg.byte3 = i;
1195                 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
1196                         return -EIO;
1197         }
1198 
1199         msg.cmd = NPE_EDB_SETPORTADDRESS;
1200         msg.eth_id = PHYSICAL_ID(port->id);
1201         msg.byte2 = dev->dev_addr[0];
1202         msg.byte3 = dev->dev_addr[1];
1203         msg.byte4 = dev->dev_addr[2];
1204         msg.byte5 = dev->dev_addr[3];
1205         msg.byte6 = dev->dev_addr[4];
1206         msg.byte7 = dev->dev_addr[5];
1207         if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
1208                 return -EIO;
1209 
1210         memset(&msg, 0, sizeof(msg));
1211         msg.cmd = NPE_FW_SETFIREWALLMODE;
1212         msg.eth_id = port->id;
1213         if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
1214                 return -EIO;
1215 
1216         if ((err = request_queues(port)) != 0)
1217                 return err;
1218 
1219         if ((err = init_queues(port)) != 0) {
1220                 destroy_queues(port);
1221                 release_queues(port);
1222                 return err;
1223         }
1224 
1225         port->speed = 0;        /* force "link up" message */
1226         phy_start(dev->phydev);
1227 
1228         for (i = 0; i < ETH_ALEN; i++)
1229                 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
1230         __raw_writel(0x08, &port->regs->random_seed);
1231         __raw_writel(0x12, &port->regs->partial_empty_threshold);
1232         __raw_writel(0x30, &port->regs->partial_full_threshold);
1233         __raw_writel(0x08, &port->regs->tx_start_bytes);
1234         __raw_writel(0x15, &port->regs->tx_deferral);
1235         __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
1236         __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
1237         __raw_writel(0x80, &port->regs->slot_time);
1238         __raw_writel(0x01, &port->regs->int_clock_threshold);
1239 
1240         /* Populate queues with buffers, no failure after this point */
1241         for (i = 0; i < TX_DESCS; i++)
1242                 queue_put_desc(port->plat->txreadyq,
1243                                tx_desc_phys(port, i), tx_desc_ptr(port, i));
1244 
1245         for (i = 0; i < RX_DESCS; i++)
1246                 queue_put_desc(RXFREE_QUEUE(port->id),
1247                                rx_desc_phys(port, i), rx_desc_ptr(port, i));
1248 
1249         __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
1250         __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
1251         __raw_writel(0, &port->regs->rx_control[1]);
1252         __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1253 
1254         napi_enable(&port->napi);
1255         eth_set_mcast_list(dev);
1256         netif_start_queue(dev);
1257 
1258         qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1259                      eth_rx_irq, dev);
1260         if (!ports_open) {
1261                 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
1262                              eth_txdone_irq, NULL);
1263                 qmgr_enable_irq(TXDONE_QUEUE);
1264         }
1265         ports_open++;
1266         /* we may already have RX data, enables IRQ */
1267         napi_schedule(&port->napi);
1268         return 0;
1269 }
1270 
1271 static int eth_close(struct net_device *dev)
1272 {
1273         struct port *port = netdev_priv(dev);
1274         struct msg msg;
1275         int buffs = RX_DESCS; /* allocated RX buffers */
1276         int i;
1277 
1278         ports_open--;
1279         qmgr_disable_irq(port->plat->rxq);
1280         napi_disable(&port->napi);
1281         netif_stop_queue(dev);
1282 
1283         while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
1284                 buffs--;
1285 
1286         memset(&msg, 0, sizeof(msg));
1287         msg.cmd = NPE_SETLOOPBACK_MODE;
1288         msg.eth_id = port->id;
1289         msg.byte3 = 1;
1290         if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
1291                 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
1292 
1293         i = 0;
1294         do {                    /* drain RX buffers */
1295                 while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
1296                         buffs--;
1297                 if (!buffs)
1298                         break;
1299                 if (qmgr_stat_empty(TX_QUEUE(port->id))) {
1300                         /* we have to inject some packet */
1301                         struct desc *desc;
1302                         u32 phys;
1303                         int n = queue_get_desc(port->plat->txreadyq, port, 1);
1304                         BUG_ON(n < 0);
1305                         desc = tx_desc_ptr(port, n);
1306                         phys = tx_desc_phys(port, n);
1307                         desc->buf_len = desc->pkt_len = 1;
1308                         wmb();
1309                         queue_put_desc(TX_QUEUE(port->id), phys, desc);
1310                 }
1311                 udelay(1);
1312         } while (++i < MAX_CLOSE_WAIT);
1313 
1314         if (buffs)
1315                 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1316                        " left in NPE\n", dev->name, buffs);
1317 #if DEBUG_CLOSE
1318         if (!buffs)
1319                 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
1320 #endif
1321 
1322         buffs = TX_DESCS;
1323         while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
1324                 buffs--; /* cancel TX */
1325 
1326         i = 0;
1327         do {
1328                 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1329                         buffs--;
1330                 if (!buffs)
1331                         break;
1332         } while (++i < MAX_CLOSE_WAIT);
1333 
1334         if (buffs)
1335                 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1336                        "left in NPE\n", dev->name, buffs);
1337 #if DEBUG_CLOSE
1338         if (!buffs)
1339                 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1340 #endif
1341 
1342         msg.byte3 = 0;
1343         if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
1344                 printk(KERN_CRIT "%s: unable to disable loopback\n",
1345                        dev->name);
1346 
1347         phy_stop(dev->phydev);
1348 
1349         if (!ports_open)
1350                 qmgr_disable_irq(TXDONE_QUEUE);
1351         destroy_queues(port);
1352         release_queues(port);
1353         return 0;
1354 }
1355 
1356 static const struct net_device_ops ixp4xx_netdev_ops = {
1357         .ndo_open = eth_open,
1358         .ndo_stop = eth_close,
1359         .ndo_start_xmit = eth_xmit,
1360         .ndo_set_rx_mode = eth_set_mcast_list,
1361         .ndo_do_ioctl = eth_ioctl,
1362         .ndo_set_mac_address = eth_mac_addr,
1363         .ndo_validate_addr = eth_validate_addr,
1364 };
1365 
1366 static int ixp4xx_eth_probe(struct platform_device *pdev)
1367 {
1368         struct port *port;
1369         struct net_device *dev;
1370         struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
1371         struct phy_device *phydev = NULL;
1372         u32 regs_phys;
1373         char phy_id[MII_BUS_ID_SIZE + 3];
1374         int err;
1375 
1376         if (!(dev = devm_alloc_etherdev(&pdev->dev, sizeof(struct port))))
1377                 return -ENOMEM;
1378 
1379         SET_NETDEV_DEV(dev, &pdev->dev);
1380         port = netdev_priv(dev);
1381         port->netdev = dev;
1382         port->id = pdev->id;
1383 
1384         switch (port->id) {
1385         case IXP4XX_ETH_NPEA:
1386                 /* If the MDIO bus is not up yet, defer probe */
1387                 if (!mdio_bus)
1388                         return -EPROBE_DEFER;
1389                 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
1390                 regs_phys  = IXP4XX_EthA_BASE_PHYS;
1391                 break;
1392         case IXP4XX_ETH_NPEB:
1393                 /*
1394                  * On all except IXP43x, NPE-B is used for the MDIO bus.
1395                  * If there is no NPE-B in the feature set, bail out, else
1396                  * register the MDIO bus.
1397                  */
1398                 if (!cpu_is_ixp43x()) {
1399                         if (!(ixp4xx_read_feature_bits() &
1400                               IXP4XX_FEATURE_NPEB_ETH0))
1401                                 return -ENODEV;
1402                         /* Else register the MDIO bus on NPE-B */
1403                         if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
1404                                 return err;
1405                 }
1406                 if (!mdio_bus)
1407                         return -EPROBE_DEFER;
1408                 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
1409                 regs_phys  = IXP4XX_EthB_BASE_PHYS;
1410                 break;
1411         case IXP4XX_ETH_NPEC:
1412                 /*
1413                  * IXP43x lacks NPE-B and uses NPE-C for the MDIO bus access,
1414                  * of there is no NPE-C, no bus, nothing works, so bail out.
1415                  */
1416                 if (cpu_is_ixp43x()) {
1417                         if (!(ixp4xx_read_feature_bits() &
1418                               IXP4XX_FEATURE_NPEC_ETH))
1419                                 return -ENODEV;
1420                         /* Else register the MDIO bus on NPE-C */
1421                         if ((err = ixp4xx_mdio_register(IXP4XX_EthC_BASE_VIRT)))
1422                                 return err;
1423                 }
1424                 if (!mdio_bus)
1425                         return -EPROBE_DEFER;
1426                 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
1427                 regs_phys  = IXP4XX_EthC_BASE_PHYS;
1428                 break;
1429         default:
1430                 return -ENODEV;
1431         }
1432 
1433         dev->netdev_ops = &ixp4xx_netdev_ops;
1434         dev->ethtool_ops = &ixp4xx_ethtool_ops;
1435         dev->tx_queue_len = 100;
1436 
1437         netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
1438 
1439         if (!(port->npe = npe_request(NPE_ID(port->id))))
1440                 return -EIO;
1441 
1442         port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
1443         if (!port->mem_res) {
1444                 err = -EBUSY;
1445                 goto err_npe_rel;
1446         }
1447 
1448         port->plat = plat;
1449         npe_port_tab[NPE_ID(port->id)] = port;
1450         memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
1451 
1452         platform_set_drvdata(pdev, dev);
1453 
1454         __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
1455                      &port->regs->core_control);
1456         udelay(50);
1457         __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1458         udelay(50);
1459 
1460         snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1461                 mdio_bus->id, plat->phy);
1462         phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
1463                              PHY_INTERFACE_MODE_MII);
1464         if (IS_ERR(phydev)) {
1465                 err = PTR_ERR(phydev);
1466                 goto err_free_mem;
1467         }
1468 
1469         phydev->irq = PHY_POLL;
1470 
1471         if ((err = register_netdev(dev)))
1472                 goto err_phy_dis;
1473 
1474         printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1475                npe_name(port->npe));
1476 
1477         return 0;
1478 
1479 err_phy_dis:
1480         phy_disconnect(phydev);
1481 err_free_mem:
1482         npe_port_tab[NPE_ID(port->id)] = NULL;
1483         release_resource(port->mem_res);
1484 err_npe_rel:
1485         npe_release(port->npe);
1486         return err;
1487 }
1488 
1489 static int ixp4xx_eth_remove(struct platform_device *pdev)
1490 {
1491         struct net_device *dev = platform_get_drvdata(pdev);
1492         struct phy_device *phydev = dev->phydev;
1493         struct port *port = netdev_priv(dev);
1494 
1495         unregister_netdev(dev);
1496         phy_disconnect(phydev);
1497         ixp4xx_mdio_remove();
1498         npe_port_tab[NPE_ID(port->id)] = NULL;
1499         npe_release(port->npe);
1500         release_resource(port->mem_res);
1501         return 0;
1502 }
1503 
1504 static struct platform_driver ixp4xx_eth_driver = {
1505         .driver.name    = DRV_NAME,
1506         .probe          = ixp4xx_eth_probe,
1507         .remove         = ixp4xx_eth_remove,
1508 };
1509 module_platform_driver(ixp4xx_eth_driver);
1510 
1511 MODULE_AUTHOR("Krzysztof Halasa");
1512 MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
1513 MODULE_LICENSE("GPL v2");
1514 MODULE_ALIAS("platform:ixp4xx_eth");

/* [<][>][^][v][top][bottom][index][help] */