root/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pch_ptp_match
  2. pch_rx_timestamp
  3. pch_tx_timestamp
  4. hwtstamp_ioctl
  5. pch_gbe_mac_load_mac_addr
  6. pch_gbe_mac_read_mac_addr
  7. pch_gbe_wait_clr_bit
  8. pch_gbe_mac_mar_set
  9. pch_gbe_mac_reset_hw
  10. pch_gbe_disable_mac_rx
  11. pch_gbe_enable_mac_rx
  12. pch_gbe_mac_init_rx_addrs
  13. pch_gbe_mac_force_mac_fc
  14. pch_gbe_mac_set_wol_event
  15. pch_gbe_mac_ctrl_miim
  16. pch_gbe_mac_set_pause_packet
  17. pch_gbe_alloc_queues
  18. pch_gbe_init_stats
  19. pch_gbe_init_phy
  20. pch_gbe_mdio_read
  21. pch_gbe_mdio_write
  22. pch_gbe_reset_task
  23. pch_gbe_reinit_locked
  24. pch_gbe_reset
  25. pch_gbe_free_irq
  26. pch_gbe_irq_disable
  27. pch_gbe_irq_enable
  28. pch_gbe_setup_tctl
  29. pch_gbe_configure_tx
  30. pch_gbe_setup_rctl
  31. pch_gbe_configure_rx
  32. pch_gbe_unmap_and_free_tx_resource
  33. pch_gbe_unmap_and_free_rx_resource
  34. pch_gbe_clean_tx_ring
  35. pch_gbe_clean_rx_ring
  36. pch_gbe_set_rgmii_ctrl
  37. pch_gbe_set_mode
  38. pch_gbe_watchdog
  39. pch_gbe_tx_queue
  40. pch_gbe_update_stats
  41. pch_gbe_disable_dma_rx
  42. pch_gbe_enable_dma_rx
  43. pch_gbe_intr
  44. pch_gbe_alloc_rx_buffers
  45. pch_gbe_alloc_rx_buffers_pool
  46. pch_gbe_alloc_tx_buffers
  47. pch_gbe_clean_tx
  48. pch_gbe_clean_rx
  49. pch_gbe_setup_tx_resources
  50. pch_gbe_setup_rx_resources
  51. pch_gbe_free_tx_resources
  52. pch_gbe_free_rx_resources
  53. pch_gbe_request_irq
  54. pch_gbe_up
  55. pch_gbe_down
  56. pch_gbe_sw_init
  57. pch_gbe_open
  58. pch_gbe_stop
  59. pch_gbe_xmit_frame
  60. pch_gbe_set_multi
  61. pch_gbe_set_mac
  62. pch_gbe_change_mtu
  63. pch_gbe_set_features
  64. pch_gbe_ioctl
  65. pch_gbe_tx_timeout
  66. pch_gbe_napi_poll
  67. pch_gbe_netpoll
  68. pch_gbe_io_error_detected
  69. pch_gbe_io_slot_reset
  70. pch_gbe_io_resume
  71. __pch_gbe_suspend
  72. pch_gbe_suspend
  73. pch_gbe_resume
  74. pch_gbe_shutdown
  75. pch_gbe_remove
  76. pch_gbe_probe
  77. pch_gbe_minnow_platform_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 1999 - 2010 Intel Corporation.
   4  * Copyright (C) 2010 - 2012 LAPIS SEMICONDUCTOR CO., LTD.
   5  *
   6  * This code was derived from the Intel e1000e Linux driver.
   7  */
   8 
   9 #include "pch_gbe.h"
  10 #include "pch_gbe_phy.h"
  11 #include <linux/module.h>
  12 #include <linux/net_tstamp.h>
  13 #include <linux/ptp_classify.h>
  14 #include <linux/gpio.h>
  15 
  16 #define DRV_VERSION     "1.01"
  17 const char pch_driver_version[] = DRV_VERSION;
  18 
  19 #define PCH_GBE_MAR_ENTRIES             16
  20 #define PCH_GBE_SHORT_PKT               64
  21 #define DSC_INIT16                      0xC000
  22 #define PCH_GBE_DMA_ALIGN               0
  23 #define PCH_GBE_DMA_PADDING             2
  24 #define PCH_GBE_WATCHDOG_PERIOD         (5 * HZ)        /* watchdog time */
  25 #define PCH_GBE_PCI_BAR                 1
  26 #define PCH_GBE_RESERVE_MEMORY          0x200000        /* 2MB */
  27 
  28 #define PCI_DEVICE_ID_INTEL_IOH1_GBE            0x8802
  29 
  30 #define PCI_DEVICE_ID_ROHM_ML7223_GBE           0x8013
  31 #define PCI_DEVICE_ID_ROHM_ML7831_GBE           0x8802
  32 
  33 #define PCH_GBE_TX_WEIGHT         64
  34 #define PCH_GBE_RX_WEIGHT         64
  35 #define PCH_GBE_RX_BUFFER_WRITE   16
  36 
  37 /* Initialize the wake-on-LAN settings */
  38 #define PCH_GBE_WL_INIT_SETTING    (PCH_GBE_WLC_MP)
  39 
  40 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
  41         PCH_GBE_CHIP_TYPE_INTERNAL | \
  42         PCH_GBE_RGMII_MODE_RGMII     \
  43         )
  44 
  45 /* Ethertype field values */
  46 #define PCH_GBE_MAX_RX_BUFFER_SIZE      0x2880
  47 #define PCH_GBE_MAX_JUMBO_FRAME_SIZE    10318
  48 #define PCH_GBE_FRAME_SIZE_2048         2048
  49 #define PCH_GBE_FRAME_SIZE_4096         4096
  50 #define PCH_GBE_FRAME_SIZE_8192         8192
  51 
  52 #define PCH_GBE_GET_DESC(R, i, type)    (&(((struct type *)((R).desc))[i]))
  53 #define PCH_GBE_RX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_rx_desc)
  54 #define PCH_GBE_TX_DESC(R, i)           PCH_GBE_GET_DESC(R, i, pch_gbe_tx_desc)
  55 #define PCH_GBE_DESC_UNUSED(R) \
  56         ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
  57         (R)->next_to_clean - (R)->next_to_use - 1)
  58 
  59 /* Pause packet value */
  60 #define PCH_GBE_PAUSE_PKT1_VALUE    0x00C28001
  61 #define PCH_GBE_PAUSE_PKT2_VALUE    0x00000100
  62 #define PCH_GBE_PAUSE_PKT4_VALUE    0x01000888
  63 #define PCH_GBE_PAUSE_PKT5_VALUE    0x0000FFFF
  64 
  65 
  66 /* This defines the bits that are set in the Interrupt Mask
  67  * Set/Read Register.  Each bit is documented below:
  68  *   o RXT0   = Receiver Timer Interrupt (ring 0)
  69  *   o TXDW   = Transmit Descriptor Written Back
  70  *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
  71  *   o RXSEQ  = Receive Sequence Error
  72  *   o LSC    = Link Status Change
  73  */
  74 #define PCH_GBE_INT_ENABLE_MASK ( \
  75         PCH_GBE_INT_RX_DMA_CMPLT |    \
  76         PCH_GBE_INT_RX_DSC_EMP   |    \
  77         PCH_GBE_INT_RX_FIFO_ERR  |    \
  78         PCH_GBE_INT_WOL_DET      |    \
  79         PCH_GBE_INT_TX_CMPLT          \
  80         )
  81 
  82 #define PCH_GBE_INT_DISABLE_ALL         0
  83 
  84 /* Macros for ieee1588 */
  85 /* 0x40 Time Synchronization Channel Control Register Bits */
  86 #define MASTER_MODE   (1<<0)
  87 #define SLAVE_MODE    (0)
  88 #define V2_MODE       (1<<31)
  89 #define CAP_MODE0     (0)
  90 #define CAP_MODE2     (1<<17)
  91 
  92 /* 0x44 Time Synchronization Channel Event Register Bits */
  93 #define TX_SNAPSHOT_LOCKED (1<<0)
  94 #define RX_SNAPSHOT_LOCKED (1<<1)
  95 
  96 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
  97 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
  98 
  99 #define MINNOW_PHY_RESET_GPIO           13
 100 
 101 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 102 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 103                                int data);
 104 static void pch_gbe_set_multi(struct net_device *netdev);
 105 
 106 static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
 107 {
 108         u8 *data = skb->data;
 109         unsigned int offset;
 110         u16 *hi, *id;
 111         u32 lo;
 112 
 113         if (ptp_classify_raw(skb) == PTP_CLASS_NONE)
 114                 return 0;
 115 
 116         offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
 117 
 118         if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
 119                 return 0;
 120 
 121         hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
 122         id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
 123 
 124         memcpy(&lo, &hi[1], sizeof(lo));
 125 
 126         return (uid_hi == *hi &&
 127                 uid_lo == lo &&
 128                 seqid  == *id);
 129 }
 130 
 131 static void
 132 pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 133 {
 134         struct skb_shared_hwtstamps *shhwtstamps;
 135         struct pci_dev *pdev;
 136         u64 ns;
 137         u32 hi, lo, val;
 138         u16 uid, seq;
 139 
 140         if (!adapter->hwts_rx_en)
 141                 return;
 142 
 143         /* Get ieee1588's dev information */
 144         pdev = adapter->ptp_pdev;
 145 
 146         val = pch_ch_event_read(pdev);
 147 
 148         if (!(val & RX_SNAPSHOT_LOCKED))
 149                 return;
 150 
 151         lo = pch_src_uuid_lo_read(pdev);
 152         hi = pch_src_uuid_hi_read(pdev);
 153 
 154         uid = hi & 0xffff;
 155         seq = (hi >> 16) & 0xffff;
 156 
 157         if (!pch_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
 158                 goto out;
 159 
 160         ns = pch_rx_snap_read(pdev);
 161 
 162         shhwtstamps = skb_hwtstamps(skb);
 163         memset(shhwtstamps, 0, sizeof(*shhwtstamps));
 164         shhwtstamps->hwtstamp = ns_to_ktime(ns);
 165 out:
 166         pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
 167 }
 168 
 169 static void
 170 pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
 171 {
 172         struct skb_shared_hwtstamps shhwtstamps;
 173         struct pci_dev *pdev;
 174         struct skb_shared_info *shtx;
 175         u64 ns;
 176         u32 cnt, val;
 177 
 178         shtx = skb_shinfo(skb);
 179         if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
 180                 return;
 181 
 182         shtx->tx_flags |= SKBTX_IN_PROGRESS;
 183 
 184         /* Get ieee1588's dev information */
 185         pdev = adapter->ptp_pdev;
 186 
 187         /*
 188          * This really stinks, but we have to poll for the Tx time stamp.
 189          */
 190         for (cnt = 0; cnt < 100; cnt++) {
 191                 val = pch_ch_event_read(pdev);
 192                 if (val & TX_SNAPSHOT_LOCKED)
 193                         break;
 194                 udelay(1);
 195         }
 196         if (!(val & TX_SNAPSHOT_LOCKED)) {
 197                 shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
 198                 return;
 199         }
 200 
 201         ns = pch_tx_snap_read(pdev);
 202 
 203         memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 204         shhwtstamps.hwtstamp = ns_to_ktime(ns);
 205         skb_tstamp_tx(skb, &shhwtstamps);
 206 
 207         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED);
 208 }
 209 
 210 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 211 {
 212         struct hwtstamp_config cfg;
 213         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 214         struct pci_dev *pdev;
 215         u8 station[20];
 216 
 217         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
 218                 return -EFAULT;
 219 
 220         if (cfg.flags) /* reserved for future extensions */
 221                 return -EINVAL;
 222 
 223         /* Get ieee1588's dev information */
 224         pdev = adapter->ptp_pdev;
 225 
 226         if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
 227                 return -ERANGE;
 228 
 229         switch (cfg.rx_filter) {
 230         case HWTSTAMP_FILTER_NONE:
 231                 adapter->hwts_rx_en = 0;
 232                 break;
 233         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 234                 adapter->hwts_rx_en = 0;
 235                 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
 236                 break;
 237         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 238                 adapter->hwts_rx_en = 1;
 239                 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
 240                 break;
 241         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 242                 adapter->hwts_rx_en = 1;
 243                 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 244                 strcpy(station, PTP_L4_MULTICAST_SA);
 245                 pch_set_station_address(station, pdev);
 246                 break;
 247         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 248                 adapter->hwts_rx_en = 1;
 249                 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
 250                 strcpy(station, PTP_L2_MULTICAST_SA);
 251                 pch_set_station_address(station, pdev);
 252                 break;
 253         default:
 254                 return -ERANGE;
 255         }
 256 
 257         adapter->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
 258 
 259         /* Clear out any old time stamps. */
 260         pch_ch_event_write(pdev, TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED);
 261 
 262         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 263 }
 264 
 265 static inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 266 {
 267         iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
 268 }
 269 
 270 /**
 271  * pch_gbe_mac_read_mac_addr - Read MAC address
 272  * @hw:             Pointer to the HW structure
 273  * Returns:
 274  *      0:                      Successful.
 275  */
 276 static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
 277 {
 278         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 279         u32  adr1a, adr1b;
 280 
 281         adr1a = ioread32(&hw->reg->mac_adr[0].high);
 282         adr1b = ioread32(&hw->reg->mac_adr[0].low);
 283 
 284         hw->mac.addr[0] = (u8)(adr1a & 0xFF);
 285         hw->mac.addr[1] = (u8)((adr1a >> 8) & 0xFF);
 286         hw->mac.addr[2] = (u8)((adr1a >> 16) & 0xFF);
 287         hw->mac.addr[3] = (u8)((adr1a >> 24) & 0xFF);
 288         hw->mac.addr[4] = (u8)(adr1b & 0xFF);
 289         hw->mac.addr[5] = (u8)((adr1b >> 8) & 0xFF);
 290 
 291         netdev_dbg(adapter->netdev, "hw->mac.addr : %pM\n", hw->mac.addr);
 292         return 0;
 293 }
 294 
 295 /**
 296  * pch_gbe_wait_clr_bit - Wait to clear a bit
 297  * @reg:        Pointer of register
 298  * @busy:       Busy bit
 299  */
 300 static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
 301 {
 302         u32 tmp;
 303 
 304         /* wait busy */
 305         tmp = 1000;
 306         while ((ioread32(reg) & bit) && --tmp)
 307                 cpu_relax();
 308         if (!tmp)
 309                 pr_err("Error: busy bit is not cleared\n");
 310 }
 311 
 312 /**
 313  * pch_gbe_mac_mar_set - Set MAC address register
 314  * @hw:     Pointer to the HW structure
 315  * @addr:   Pointer to the MAC address
 316  * @index:  MAC address array register
 317  */
 318 static void pch_gbe_mac_mar_set(struct pch_gbe_hw *hw, u8 * addr, u32 index)
 319 {
 320         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 321         u32 mar_low, mar_high, adrmask;
 322 
 323         netdev_dbg(adapter->netdev, "index : 0x%x\n", index);
 324 
 325         /*
 326          * HW expects these in little endian so we reverse the byte order
 327          * from network order (big endian) to little endian
 328          */
 329         mar_high = ((u32) addr[0] | ((u32) addr[1] << 8) |
 330                    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
 331         mar_low = ((u32) addr[4] | ((u32) addr[5] << 8));
 332         /* Stop the MAC Address of index. */
 333         adrmask = ioread32(&hw->reg->ADDR_MASK);
 334         iowrite32((adrmask | (0x0001 << index)), &hw->reg->ADDR_MASK);
 335         /* wait busy */
 336         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 337         /* Set the MAC address to the MAC address 1A/1B register */
 338         iowrite32(mar_high, &hw->reg->mac_adr[index].high);
 339         iowrite32(mar_low, &hw->reg->mac_adr[index].low);
 340         /* Start the MAC address of index */
 341         iowrite32((adrmask & ~(0x0001 << index)), &hw->reg->ADDR_MASK);
 342         /* wait busy */
 343         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 344 }
 345 
 346 /**
 347  * pch_gbe_mac_reset_hw - Reset hardware
 348  * @hw: Pointer to the HW structure
 349  */
 350 static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
 351 {
 352         /* Read the MAC address. and store to the private data */
 353         pch_gbe_mac_read_mac_addr(hw);
 354         iowrite32(PCH_GBE_ALL_RST, &hw->reg->RESET);
 355         iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
 356         pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
 357         /* Setup the receive addresses */
 358         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 359         return;
 360 }
 361 
 362 static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
 363 {
 364         u32 rctl;
 365         /* Disables Receive MAC */
 366         rctl = ioread32(&hw->reg->MAC_RX_EN);
 367         iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 368 }
 369 
 370 static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
 371 {
 372         u32 rctl;
 373         /* Enables Receive MAC */
 374         rctl = ioread32(&hw->reg->MAC_RX_EN);
 375         iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
 376 }
 377 
 378 /**
 379  * pch_gbe_mac_init_rx_addrs - Initialize receive address's
 380  * @hw: Pointer to the HW structure
 381  * @mar_count: Receive address registers
 382  */
 383 static void pch_gbe_mac_init_rx_addrs(struct pch_gbe_hw *hw, u16 mar_count)
 384 {
 385         u32 i;
 386 
 387         /* Setup the receive address */
 388         pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
 389 
 390         /* Zero out the other receive addresses */
 391         for (i = 1; i < mar_count; i++) {
 392                 iowrite32(0, &hw->reg->mac_adr[i].high);
 393                 iowrite32(0, &hw->reg->mac_adr[i].low);
 394         }
 395         iowrite32(0xFFFE, &hw->reg->ADDR_MASK);
 396         /* wait busy */
 397         pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
 398 }
 399 
 400 /**
 401  * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
 402  * @hw:             Pointer to the HW structure
 403  * Returns:
 404  *      0:                      Successful.
 405  *      Negative value:         Failed.
 406  */
 407 s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw)
 408 {
 409         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 410         struct pch_gbe_mac_info *mac = &hw->mac;
 411         u32 rx_fctrl;
 412 
 413         netdev_dbg(adapter->netdev, "mac->fc = %u\n", mac->fc);
 414 
 415         rx_fctrl = ioread32(&hw->reg->RX_FCTRL);
 416 
 417         switch (mac->fc) {
 418         case PCH_GBE_FC_NONE:
 419                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 420                 mac->tx_fc_enable = false;
 421                 break;
 422         case PCH_GBE_FC_RX_PAUSE:
 423                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 424                 mac->tx_fc_enable = false;
 425                 break;
 426         case PCH_GBE_FC_TX_PAUSE:
 427                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 428                 mac->tx_fc_enable = true;
 429                 break;
 430         case PCH_GBE_FC_FULL:
 431                 rx_fctrl |= PCH_GBE_FL_CTRL_EN;
 432                 mac->tx_fc_enable = true;
 433                 break;
 434         default:
 435                 netdev_err(adapter->netdev,
 436                            "Flow control param set incorrectly\n");
 437                 return -EINVAL;
 438         }
 439         if (mac->link_duplex == DUPLEX_HALF)
 440                 rx_fctrl &= ~PCH_GBE_FL_CTRL_EN;
 441         iowrite32(rx_fctrl, &hw->reg->RX_FCTRL);
 442         netdev_dbg(adapter->netdev,
 443                    "RX_FCTRL reg : 0x%08x  mac->tx_fc_enable : %d\n",
 444                    ioread32(&hw->reg->RX_FCTRL), mac->tx_fc_enable);
 445         return 0;
 446 }
 447 
 448 /**
 449  * pch_gbe_mac_set_wol_event - Set wake-on-lan event
 450  * @hw:     Pointer to the HW structure
 451  * @wu_evt: Wake up event
 452  */
 453 static void pch_gbe_mac_set_wol_event(struct pch_gbe_hw *hw, u32 wu_evt)
 454 {
 455         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 456         u32 addr_mask;
 457 
 458         netdev_dbg(adapter->netdev, "wu_evt : 0x%08x  ADDR_MASK reg : 0x%08x\n",
 459                    wu_evt, ioread32(&hw->reg->ADDR_MASK));
 460 
 461         if (wu_evt) {
 462                 /* Set Wake-On-Lan address mask */
 463                 addr_mask = ioread32(&hw->reg->ADDR_MASK);
 464                 iowrite32(addr_mask, &hw->reg->WOL_ADDR_MASK);
 465                 /* wait busy */
 466                 pch_gbe_wait_clr_bit(&hw->reg->WOL_ADDR_MASK, PCH_GBE_WLA_BUSY);
 467                 iowrite32(0, &hw->reg->WOL_ST);
 468                 iowrite32((wu_evt | PCH_GBE_WLC_WOL_MODE), &hw->reg->WOL_CTRL);
 469                 iowrite32(0x02, &hw->reg->TCPIP_ACC);
 470                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 471         } else {
 472                 iowrite32(0, &hw->reg->WOL_CTRL);
 473                 iowrite32(0, &hw->reg->WOL_ST);
 474         }
 475         return;
 476 }
 477 
 478 /**
 479  * pch_gbe_mac_ctrl_miim - Control MIIM interface
 480  * @hw:   Pointer to the HW structure
 481  * @addr: Address of PHY
 482  * @dir:  Operetion. (Write or Read)
 483  * @reg:  Access register of PHY
 484  * @data: Write data.
 485  *
 486  * Returns: Read date.
 487  */
 488 u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
 489                         u16 data)
 490 {
 491         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 492         u32 data_out = 0;
 493         unsigned int i;
 494         unsigned long flags;
 495 
 496         spin_lock_irqsave(&hw->miim_lock, flags);
 497 
 498         for (i = 100; i; --i) {
 499                 if ((ioread32(&hw->reg->MIIM) & PCH_GBE_MIIM_OPER_READY))
 500                         break;
 501                 udelay(20);
 502         }
 503         if (i == 0) {
 504                 netdev_err(adapter->netdev, "pch-gbe.miim won't go Ready\n");
 505                 spin_unlock_irqrestore(&hw->miim_lock, flags);
 506                 return 0;       /* No way to indicate timeout error */
 507         }
 508         iowrite32(((reg << PCH_GBE_MIIM_REG_ADDR_SHIFT) |
 509                   (addr << PCH_GBE_MIIM_PHY_ADDR_SHIFT) |
 510                   dir | data), &hw->reg->MIIM);
 511         for (i = 0; i < 100; i++) {
 512                 udelay(20);
 513                 data_out = ioread32(&hw->reg->MIIM);
 514                 if ((data_out & PCH_GBE_MIIM_OPER_READY))
 515                         break;
 516         }
 517         spin_unlock_irqrestore(&hw->miim_lock, flags);
 518 
 519         netdev_dbg(adapter->netdev, "PHY %s: reg=%d, data=0x%04X\n",
 520                    dir == PCH_GBE_MIIM_OPER_READ ? "READ" : "WRITE", reg,
 521                    dir == PCH_GBE_MIIM_OPER_READ ? data_out : data);
 522         return (u16) data_out;
 523 }
 524 
 525 /**
 526  * pch_gbe_mac_set_pause_packet - Set pause packet
 527  * @hw:   Pointer to the HW structure
 528  */
 529 static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
 530 {
 531         struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
 532         unsigned long tmp2, tmp3;
 533 
 534         /* Set Pause packet */
 535         tmp2 = hw->mac.addr[1];
 536         tmp2 = (tmp2 << 8) | hw->mac.addr[0];
 537         tmp2 = PCH_GBE_PAUSE_PKT2_VALUE | (tmp2 << 16);
 538 
 539         tmp3 = hw->mac.addr[5];
 540         tmp3 = (tmp3 << 8) | hw->mac.addr[4];
 541         tmp3 = (tmp3 << 8) | hw->mac.addr[3];
 542         tmp3 = (tmp3 << 8) | hw->mac.addr[2];
 543 
 544         iowrite32(PCH_GBE_PAUSE_PKT1_VALUE, &hw->reg->PAUSE_PKT1);
 545         iowrite32(tmp2, &hw->reg->PAUSE_PKT2);
 546         iowrite32(tmp3, &hw->reg->PAUSE_PKT3);
 547         iowrite32(PCH_GBE_PAUSE_PKT4_VALUE, &hw->reg->PAUSE_PKT4);
 548         iowrite32(PCH_GBE_PAUSE_PKT5_VALUE, &hw->reg->PAUSE_PKT5);
 549 
 550         /* Transmit Pause Packet */
 551         iowrite32(PCH_GBE_PS_PKT_RQ, &hw->reg->PAUSE_REQ);
 552 
 553         netdev_dbg(adapter->netdev,
 554                    "PAUSE_PKT1-5 reg : 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
 555                    ioread32(&hw->reg->PAUSE_PKT1),
 556                    ioread32(&hw->reg->PAUSE_PKT2),
 557                    ioread32(&hw->reg->PAUSE_PKT3),
 558                    ioread32(&hw->reg->PAUSE_PKT4),
 559                    ioread32(&hw->reg->PAUSE_PKT5));
 560 
 561         return;
 562 }
 563 
 564 
 565 /**
 566  * pch_gbe_alloc_queues - Allocate memory for all rings
 567  * @adapter:  Board private structure to initialize
 568  * Returns:
 569  *      0:      Successfully
 570  *      Negative value: Failed
 571  */
 572 static int pch_gbe_alloc_queues(struct pch_gbe_adapter *adapter)
 573 {
 574         adapter->tx_ring = devm_kzalloc(&adapter->pdev->dev,
 575                                         sizeof(*adapter->tx_ring), GFP_KERNEL);
 576         if (!adapter->tx_ring)
 577                 return -ENOMEM;
 578 
 579         adapter->rx_ring = devm_kzalloc(&adapter->pdev->dev,
 580                                         sizeof(*adapter->rx_ring), GFP_KERNEL);
 581         if (!adapter->rx_ring)
 582                 return -ENOMEM;
 583         return 0;
 584 }
 585 
 586 /**
 587  * pch_gbe_init_stats - Initialize status
 588  * @adapter:  Board private structure to initialize
 589  */
 590 static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
 591 {
 592         memset(&adapter->stats, 0, sizeof(adapter->stats));
 593         return;
 594 }
 595 
 596 /**
 597  * pch_gbe_init_phy - Initialize PHY
 598  * @adapter:  Board private structure to initialize
 599  * Returns:
 600  *      0:      Successfully
 601  *      Negative value: Failed
 602  */
 603 static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
 604 {
 605         struct net_device *netdev = adapter->netdev;
 606         u32 addr;
 607         u16 bmcr, stat;
 608 
 609         /* Discover phy addr by searching addrs in order {1,0,2,..., 31} */
 610         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 611                 adapter->mii.phy_id = (addr == 0) ? 1 : (addr == 1) ? 0 : addr;
 612                 bmcr = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMCR);
 613                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 614                 stat = pch_gbe_mdio_read(netdev, adapter->mii.phy_id, MII_BMSR);
 615                 if (!((bmcr == 0xFFFF) || ((stat == 0) && (bmcr == 0))))
 616                         break;
 617         }
 618         adapter->hw.phy.addr = adapter->mii.phy_id;
 619         netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
 620         if (addr == PCH_GBE_PHY_REGS_LEN)
 621                 return -EAGAIN;
 622         /* Selected the phy and isolate the rest */
 623         for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
 624                 if (addr != adapter->mii.phy_id) {
 625                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 626                                            BMCR_ISOLATE);
 627                 } else {
 628                         bmcr = pch_gbe_mdio_read(netdev, addr, MII_BMCR);
 629                         pch_gbe_mdio_write(netdev, addr, MII_BMCR,
 630                                            bmcr & ~BMCR_ISOLATE);
 631                 }
 632         }
 633 
 634         /* MII setup */
 635         adapter->mii.phy_id_mask = 0x1F;
 636         adapter->mii.reg_num_mask = 0x1F;
 637         adapter->mii.dev = adapter->netdev;
 638         adapter->mii.mdio_read = pch_gbe_mdio_read;
 639         adapter->mii.mdio_write = pch_gbe_mdio_write;
 640         adapter->mii.supports_gmii = mii_check_gmii_support(&adapter->mii);
 641         return 0;
 642 }
 643 
 644 /**
 645  * pch_gbe_mdio_read - The read function for mii
 646  * @netdev: Network interface device structure
 647  * @addr:   Phy ID
 648  * @reg:    Access location
 649  * Returns:
 650  *      0:      Successfully
 651  *      Negative value: Failed
 652  */
 653 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg)
 654 {
 655         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 656         struct pch_gbe_hw *hw = &adapter->hw;
 657 
 658         return pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_READ, reg,
 659                                      (u16) 0);
 660 }
 661 
 662 /**
 663  * pch_gbe_mdio_write - The write function for mii
 664  * @netdev: Network interface device structure
 665  * @addr:   Phy ID (not used)
 666  * @reg:    Access location
 667  * @data:   Write data
 668  */
 669 static void pch_gbe_mdio_write(struct net_device *netdev,
 670                                int addr, int reg, int data)
 671 {
 672         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 673         struct pch_gbe_hw *hw = &adapter->hw;
 674 
 675         pch_gbe_mac_ctrl_miim(hw, addr, PCH_GBE_HAL_MIIM_WRITE, reg, data);
 676 }
 677 
 678 /**
 679  * pch_gbe_reset_task - Reset processing at the time of transmission timeout
 680  * @work:  Pointer of board private structure
 681  */
 682 static void pch_gbe_reset_task(struct work_struct *work)
 683 {
 684         struct pch_gbe_adapter *adapter;
 685         adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 686 
 687         rtnl_lock();
 688         pch_gbe_reinit_locked(adapter);
 689         rtnl_unlock();
 690 }
 691 
 692 /**
 693  * pch_gbe_reinit_locked- Re-initialization
 694  * @adapter:  Board private structure
 695  */
 696 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 697 {
 698         pch_gbe_down(adapter);
 699         pch_gbe_up(adapter);
 700 }
 701 
 702 /**
 703  * pch_gbe_reset - Reset GbE
 704  * @adapter:  Board private structure
 705  */
 706 void pch_gbe_reset(struct pch_gbe_adapter *adapter)
 707 {
 708         struct net_device *netdev = adapter->netdev;
 709         struct pch_gbe_hw *hw = &adapter->hw;
 710         s32 ret_val;
 711 
 712         pch_gbe_mac_reset_hw(hw);
 713         /* reprogram multicast address register after reset */
 714         pch_gbe_set_multi(netdev);
 715         /* Setup the receive address. */
 716         pch_gbe_mac_init_rx_addrs(hw, PCH_GBE_MAR_ENTRIES);
 717 
 718         ret_val = pch_gbe_phy_get_id(hw);
 719         if (ret_val) {
 720                 netdev_err(adapter->netdev, "pch_gbe_phy_get_id error\n");
 721                 return;
 722         }
 723         pch_gbe_phy_init_setting(hw);
 724         /* Setup Mac interface option RGMII */
 725         pch_gbe_phy_set_rgmii(hw);
 726 }
 727 
 728 /**
 729  * pch_gbe_free_irq - Free an interrupt
 730  * @adapter:  Board private structure
 731  */
 732 static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter)
 733 {
 734         struct net_device *netdev = adapter->netdev;
 735 
 736         free_irq(adapter->irq, netdev);
 737         pci_free_irq_vectors(adapter->pdev);
 738 }
 739 
 740 /**
 741  * pch_gbe_irq_disable - Mask off interrupt generation on the NIC
 742  * @adapter:  Board private structure
 743  */
 744 static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter)
 745 {
 746         struct pch_gbe_hw *hw = &adapter->hw;
 747 
 748         atomic_inc(&adapter->irq_sem);
 749         iowrite32(0, &hw->reg->INT_EN);
 750         ioread32(&hw->reg->INT_ST);
 751         synchronize_irq(adapter->irq);
 752 
 753         netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
 754                    ioread32(&hw->reg->INT_EN));
 755 }
 756 
 757 /**
 758  * pch_gbe_irq_enable - Enable default interrupt generation settings
 759  * @adapter:  Board private structure
 760  */
 761 static void pch_gbe_irq_enable(struct pch_gbe_adapter *adapter)
 762 {
 763         struct pch_gbe_hw *hw = &adapter->hw;
 764 
 765         if (likely(atomic_dec_and_test(&adapter->irq_sem)))
 766                 iowrite32(PCH_GBE_INT_ENABLE_MASK, &hw->reg->INT_EN);
 767         ioread32(&hw->reg->INT_ST);
 768         netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n",
 769                    ioread32(&hw->reg->INT_EN));
 770 }
 771 
 772 
 773 
 774 /**
 775  * pch_gbe_setup_tctl - configure the Transmit control registers
 776  * @adapter:  Board private structure
 777  */
 778 static void pch_gbe_setup_tctl(struct pch_gbe_adapter *adapter)
 779 {
 780         struct pch_gbe_hw *hw = &adapter->hw;
 781         u32 tx_mode, tcpip;
 782 
 783         tx_mode = PCH_GBE_TM_LONG_PKT |
 784                 PCH_GBE_TM_ST_AND_FD |
 785                 PCH_GBE_TM_SHORT_PKT |
 786                 PCH_GBE_TM_TH_TX_STRT_8 |
 787                 PCH_GBE_TM_TH_ALM_EMP_4 | PCH_GBE_TM_TH_ALM_FULL_8;
 788 
 789         iowrite32(tx_mode, &hw->reg->TX_MODE);
 790 
 791         tcpip = ioread32(&hw->reg->TCPIP_ACC);
 792         tcpip |= PCH_GBE_TX_TCPIPACC_EN;
 793         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 794         return;
 795 }
 796 
 797 /**
 798  * pch_gbe_configure_tx - Configure Transmit Unit after Reset
 799  * @adapter:  Board private structure
 800  */
 801 static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
 802 {
 803         struct pch_gbe_hw *hw = &adapter->hw;
 804         u32 tdba, tdlen, dctrl;
 805 
 806         netdev_dbg(adapter->netdev, "dma addr = 0x%08llx  size = 0x%08x\n",
 807                    (unsigned long long)adapter->tx_ring->dma,
 808                    adapter->tx_ring->size);
 809 
 810         /* Setup the HW Tx Head and Tail descriptor pointers */
 811         tdba = adapter->tx_ring->dma;
 812         tdlen = adapter->tx_ring->size - 0x10;
 813         iowrite32(tdba, &hw->reg->TX_DSC_BASE);
 814         iowrite32(tdlen, &hw->reg->TX_DSC_SIZE);
 815         iowrite32(tdba, &hw->reg->TX_DSC_SW_P);
 816 
 817         /* Enables Transmission DMA */
 818         dctrl = ioread32(&hw->reg->DMA_CTRL);
 819         dctrl |= PCH_GBE_TX_DMA_EN;
 820         iowrite32(dctrl, &hw->reg->DMA_CTRL);
 821 }
 822 
 823 /**
 824  * pch_gbe_setup_rctl - Configure the receive control registers
 825  * @adapter:  Board private structure
 826  */
 827 static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
 828 {
 829         struct pch_gbe_hw *hw = &adapter->hw;
 830         u32 rx_mode, tcpip;
 831 
 832         rx_mode = PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN |
 833         PCH_GBE_RH_ALM_EMP_4 | PCH_GBE_RH_ALM_FULL_4 | PCH_GBE_RH_RD_TRG_8;
 834 
 835         iowrite32(rx_mode, &hw->reg->RX_MODE);
 836 
 837         tcpip = ioread32(&hw->reg->TCPIP_ACC);
 838 
 839         tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
 840         tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
 841         iowrite32(tcpip, &hw->reg->TCPIP_ACC);
 842         return;
 843 }
 844 
 845 /**
 846  * pch_gbe_configure_rx - Configure Receive Unit after Reset
 847  * @adapter:  Board private structure
 848  */
 849 static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
 850 {
 851         struct pch_gbe_hw *hw = &adapter->hw;
 852         u32 rdba, rdlen, rxdma;
 853 
 854         netdev_dbg(adapter->netdev, "dma adr = 0x%08llx  size = 0x%08x\n",
 855                    (unsigned long long)adapter->rx_ring->dma,
 856                    adapter->rx_ring->size);
 857 
 858         pch_gbe_mac_force_mac_fc(hw);
 859 
 860         pch_gbe_disable_mac_rx(hw);
 861 
 862         /* Disables Receive DMA */
 863         rxdma = ioread32(&hw->reg->DMA_CTRL);
 864         rxdma &= ~PCH_GBE_RX_DMA_EN;
 865         iowrite32(rxdma, &hw->reg->DMA_CTRL);
 866 
 867         netdev_dbg(adapter->netdev,
 868                    "MAC_RX_EN reg = 0x%08x  DMA_CTRL reg = 0x%08x\n",
 869                    ioread32(&hw->reg->MAC_RX_EN),
 870                    ioread32(&hw->reg->DMA_CTRL));
 871 
 872         /* Setup the HW Rx Head and Tail Descriptor Pointers and
 873          * the Base and Length of the Rx Descriptor Ring */
 874         rdba = adapter->rx_ring->dma;
 875         rdlen = adapter->rx_ring->size - 0x10;
 876         iowrite32(rdba, &hw->reg->RX_DSC_BASE);
 877         iowrite32(rdlen, &hw->reg->RX_DSC_SIZE);
 878         iowrite32((rdba + rdlen), &hw->reg->RX_DSC_SW_P);
 879 }
 880 
 881 /**
 882  * pch_gbe_unmap_and_free_tx_resource - Unmap and free tx socket buffer
 883  * @adapter:     Board private structure
 884  * @buffer_info: Buffer information structure
 885  */
 886 static void pch_gbe_unmap_and_free_tx_resource(
 887         struct pch_gbe_adapter *adapter, struct pch_gbe_buffer *buffer_info)
 888 {
 889         if (buffer_info->mapped) {
 890                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 891                                  buffer_info->length, DMA_TO_DEVICE);
 892                 buffer_info->mapped = false;
 893         }
 894         if (buffer_info->skb) {
 895                 dev_kfree_skb_any(buffer_info->skb);
 896                 buffer_info->skb = NULL;
 897         }
 898 }
 899 
 900 /**
 901  * pch_gbe_unmap_and_free_rx_resource - Unmap and free rx socket buffer
 902  * @adapter:      Board private structure
 903  * @buffer_info:  Buffer information structure
 904  */
 905 static void pch_gbe_unmap_and_free_rx_resource(
 906                                         struct pch_gbe_adapter *adapter,
 907                                         struct pch_gbe_buffer *buffer_info)
 908 {
 909         if (buffer_info->mapped) {
 910                 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
 911                                  buffer_info->length, DMA_FROM_DEVICE);
 912                 buffer_info->mapped = false;
 913         }
 914         if (buffer_info->skb) {
 915                 dev_kfree_skb_any(buffer_info->skb);
 916                 buffer_info->skb = NULL;
 917         }
 918 }
 919 
 920 /**
 921  * pch_gbe_clean_tx_ring - Free Tx Buffers
 922  * @adapter:  Board private structure
 923  * @tx_ring:  Ring to be cleaned
 924  */
 925 static void pch_gbe_clean_tx_ring(struct pch_gbe_adapter *adapter,
 926                                    struct pch_gbe_tx_ring *tx_ring)
 927 {
 928         struct pch_gbe_hw *hw = &adapter->hw;
 929         struct pch_gbe_buffer *buffer_info;
 930         unsigned long size;
 931         unsigned int i;
 932 
 933         /* Free all the Tx ring sk_buffs */
 934         for (i = 0; i < tx_ring->count; i++) {
 935                 buffer_info = &tx_ring->buffer_info[i];
 936                 pch_gbe_unmap_and_free_tx_resource(adapter, buffer_info);
 937         }
 938         netdev_dbg(adapter->netdev,
 939                    "call pch_gbe_unmap_and_free_tx_resource() %d count\n", i);
 940 
 941         size = (unsigned long)sizeof(struct pch_gbe_buffer) * tx_ring->count;
 942         memset(tx_ring->buffer_info, 0, size);
 943 
 944         /* Zero out the descriptor ring */
 945         memset(tx_ring->desc, 0, tx_ring->size);
 946         tx_ring->next_to_use = 0;
 947         tx_ring->next_to_clean = 0;
 948         iowrite32(tx_ring->dma, &hw->reg->TX_DSC_HW_P);
 949         iowrite32((tx_ring->size - 0x10), &hw->reg->TX_DSC_SIZE);
 950 }
 951 
 952 /**
 953  * pch_gbe_clean_rx_ring - Free Rx Buffers
 954  * @adapter:  Board private structure
 955  * @rx_ring:  Ring to free buffers from
 956  */
 957 static void
 958 pch_gbe_clean_rx_ring(struct pch_gbe_adapter *adapter,
 959                       struct pch_gbe_rx_ring *rx_ring)
 960 {
 961         struct pch_gbe_hw *hw = &adapter->hw;
 962         struct pch_gbe_buffer *buffer_info;
 963         unsigned long size;
 964         unsigned int i;
 965 
 966         /* Free all the Rx ring sk_buffs */
 967         for (i = 0; i < rx_ring->count; i++) {
 968                 buffer_info = &rx_ring->buffer_info[i];
 969                 pch_gbe_unmap_and_free_rx_resource(adapter, buffer_info);
 970         }
 971         netdev_dbg(adapter->netdev,
 972                    "call pch_gbe_unmap_and_free_rx_resource() %d count\n", i);
 973         size = (unsigned long)sizeof(struct pch_gbe_buffer) * rx_ring->count;
 974         memset(rx_ring->buffer_info, 0, size);
 975 
 976         /* Zero out the descriptor ring */
 977         memset(rx_ring->desc, 0, rx_ring->size);
 978         rx_ring->next_to_clean = 0;
 979         rx_ring->next_to_use = 0;
 980         iowrite32(rx_ring->dma, &hw->reg->RX_DSC_HW_P);
 981         iowrite32((rx_ring->size - 0x10), &hw->reg->RX_DSC_SIZE);
 982 }
 983 
 984 static void pch_gbe_set_rgmii_ctrl(struct pch_gbe_adapter *adapter, u16 speed,
 985                                     u16 duplex)
 986 {
 987         struct pch_gbe_hw *hw = &adapter->hw;
 988         unsigned long rgmii = 0;
 989 
 990         /* Set the RGMII control. */
 991         switch (speed) {
 992         case SPEED_10:
 993                 rgmii = (PCH_GBE_RGMII_RATE_2_5M |
 994                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
 995                 break;
 996         case SPEED_100:
 997                 rgmii = (PCH_GBE_RGMII_RATE_25M |
 998                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
 999                 break;
1000         case SPEED_1000:
1001                 rgmii = (PCH_GBE_RGMII_RATE_125M |
1002                          PCH_GBE_MAC_RGMII_CTRL_SETTING);
1003                 break;
1004         }
1005         iowrite32(rgmii, &hw->reg->RGMII_CTRL);
1006 }
1007 static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
1008                               u16 duplex)
1009 {
1010         struct net_device *netdev = adapter->netdev;
1011         struct pch_gbe_hw *hw = &adapter->hw;
1012         unsigned long mode = 0;
1013 
1014         /* Set the communication mode */
1015         switch (speed) {
1016         case SPEED_10:
1017                 mode = PCH_GBE_MODE_MII_ETHER;
1018                 netdev->tx_queue_len = 10;
1019                 break;
1020         case SPEED_100:
1021                 mode = PCH_GBE_MODE_MII_ETHER;
1022                 netdev->tx_queue_len = 100;
1023                 break;
1024         case SPEED_1000:
1025                 mode = PCH_GBE_MODE_GMII_ETHER;
1026                 break;
1027         }
1028         if (duplex == DUPLEX_FULL)
1029                 mode |= PCH_GBE_MODE_FULL_DUPLEX;
1030         else
1031                 mode |= PCH_GBE_MODE_HALF_DUPLEX;
1032         iowrite32(mode, &hw->reg->MODE);
1033 }
1034 
1035 /**
1036  * pch_gbe_watchdog - Watchdog process
1037  * @data:  Board private structure
1038  */
1039 static void pch_gbe_watchdog(struct timer_list *t)
1040 {
1041         struct pch_gbe_adapter *adapter = from_timer(adapter, t,
1042                                                      watchdog_timer);
1043         struct net_device *netdev = adapter->netdev;
1044         struct pch_gbe_hw *hw = &adapter->hw;
1045 
1046         netdev_dbg(netdev, "right now = %ld\n", jiffies);
1047 
1048         pch_gbe_update_stats(adapter);
1049         if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
1050                 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
1051                 netdev->tx_queue_len = adapter->tx_queue_len;
1052                 /* mii library handles link maintenance tasks */
1053                 if (mii_ethtool_gset(&adapter->mii, &cmd)) {
1054                         netdev_err(netdev, "ethtool get setting Error\n");
1055                         mod_timer(&adapter->watchdog_timer,
1056                                   round_jiffies(jiffies +
1057                                                 PCH_GBE_WATCHDOG_PERIOD));
1058                         return;
1059                 }
1060                 hw->mac.link_speed = ethtool_cmd_speed(&cmd);
1061                 hw->mac.link_duplex = cmd.duplex;
1062                 /* Set the RGMII control. */
1063                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
1064                                                 hw->mac.link_duplex);
1065                 /* Set the communication mode */
1066                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
1067                                  hw->mac.link_duplex);
1068                 netdev_dbg(netdev,
1069                            "Link is Up %d Mbps %s-Duplex\n",
1070                            hw->mac.link_speed,
1071                            cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1072                 netif_carrier_on(netdev);
1073                 netif_wake_queue(netdev);
1074         } else if ((!mii_link_ok(&adapter->mii)) &&
1075                    (netif_carrier_ok(netdev))) {
1076                 netdev_dbg(netdev, "NIC Link is Down\n");
1077                 hw->mac.link_speed = SPEED_10;
1078                 hw->mac.link_duplex = DUPLEX_HALF;
1079                 netif_carrier_off(netdev);
1080                 netif_stop_queue(netdev);
1081         }
1082         mod_timer(&adapter->watchdog_timer,
1083                   round_jiffies(jiffies + PCH_GBE_WATCHDOG_PERIOD));
1084 }
1085 
1086 /**
1087  * pch_gbe_tx_queue - Carry out queuing of the transmission data
1088  * @adapter:  Board private structure
1089  * @tx_ring:  Tx descriptor ring structure
1090  * @skb:      Sockt buffer structure
1091  */
1092 static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1093                               struct pch_gbe_tx_ring *tx_ring,
1094                               struct sk_buff *skb)
1095 {
1096         struct pch_gbe_hw *hw = &adapter->hw;
1097         struct pch_gbe_tx_desc *tx_desc;
1098         struct pch_gbe_buffer *buffer_info;
1099         struct sk_buff *tmp_skb;
1100         unsigned int frame_ctrl;
1101         unsigned int ring_num;
1102 
1103         /*-- Set frame control --*/
1104         frame_ctrl = 0;
1105         if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
1106                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
1107         if (skb->ip_summed == CHECKSUM_NONE)
1108                 frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1109 
1110         /* Performs checksum processing */
1111         /*
1112          * It is because the hardware accelerator does not support a checksum,
1113          * when the received data size is less than 64 bytes.
1114          */
1115         if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
1116                 frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
1117                               PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
1118                 if (skb->protocol == htons(ETH_P_IP)) {
1119                         struct iphdr *iph = ip_hdr(skb);
1120                         unsigned int offset;
1121                         offset = skb_transport_offset(skb);
1122                         if (iph->protocol == IPPROTO_TCP) {
1123                                 skb->csum = 0;
1124                                 tcp_hdr(skb)->check = 0;
1125                                 skb->csum = skb_checksum(skb, offset,
1126                                                          skb->len - offset, 0);
1127                                 tcp_hdr(skb)->check =
1128                                         csum_tcpudp_magic(iph->saddr,
1129                                                           iph->daddr,
1130                                                           skb->len - offset,
1131                                                           IPPROTO_TCP,
1132                                                           skb->csum);
1133                         } else if (iph->protocol == IPPROTO_UDP) {
1134                                 skb->csum = 0;
1135                                 udp_hdr(skb)->check = 0;
1136                                 skb->csum =
1137                                         skb_checksum(skb, offset,
1138                                                      skb->len - offset, 0);
1139                                 udp_hdr(skb)->check =
1140                                         csum_tcpudp_magic(iph->saddr,
1141                                                           iph->daddr,
1142                                                           skb->len - offset,
1143                                                           IPPROTO_UDP,
1144                                                           skb->csum);
1145                         }
1146                 }
1147         }
1148 
1149         ring_num = tx_ring->next_to_use;
1150         if (unlikely((ring_num + 1) == tx_ring->count))
1151                 tx_ring->next_to_use = 0;
1152         else
1153                 tx_ring->next_to_use = ring_num + 1;
1154 
1155 
1156         buffer_info = &tx_ring->buffer_info[ring_num];
1157         tmp_skb = buffer_info->skb;
1158 
1159         /* [Header:14][payload] ---> [Header:14][paddong:2][payload]    */
1160         memcpy(tmp_skb->data, skb->data, ETH_HLEN);
1161         tmp_skb->data[ETH_HLEN] = 0x00;
1162         tmp_skb->data[ETH_HLEN + 1] = 0x00;
1163         tmp_skb->len = skb->len;
1164         memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
1165                (skb->len - ETH_HLEN));
1166         /*-- Set Buffer information --*/
1167         buffer_info->length = tmp_skb->len;
1168         buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
1169                                           buffer_info->length,
1170                                           DMA_TO_DEVICE);
1171         if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1172                 netdev_err(adapter->netdev, "TX DMA map failed\n");
1173                 buffer_info->dma = 0;
1174                 buffer_info->time_stamp = 0;
1175                 tx_ring->next_to_use = ring_num;
1176                 return;
1177         }
1178         buffer_info->mapped = true;
1179         buffer_info->time_stamp = jiffies;
1180 
1181         /*-- Set Tx descriptor --*/
1182         tx_desc = PCH_GBE_TX_DESC(*tx_ring, ring_num);
1183         tx_desc->buffer_addr = (buffer_info->dma);
1184         tx_desc->length = (tmp_skb->len);
1185         tx_desc->tx_words_eob = ((tmp_skb->len + 3));
1186         tx_desc->tx_frame_ctrl = (frame_ctrl);
1187         tx_desc->gbec_status = (DSC_INIT16);
1188 
1189         if (unlikely(++ring_num == tx_ring->count))
1190                 ring_num = 0;
1191 
1192         /* Update software pointer of TX descriptor */
1193         iowrite32(tx_ring->dma +
1194                   (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1195                   &hw->reg->TX_DSC_SW_P);
1196 
1197         pch_tx_timestamp(adapter, skb);
1198 
1199         dev_kfree_skb_any(skb);
1200 }
1201 
1202 /**
1203  * pch_gbe_update_stats - Update the board statistics counters
1204  * @adapter:  Board private structure
1205  */
1206 void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1207 {
1208         struct net_device *netdev = adapter->netdev;
1209         struct pci_dev *pdev = adapter->pdev;
1210         struct pch_gbe_hw_stats *stats = &adapter->stats;
1211         unsigned long flags;
1212 
1213         /*
1214          * Prevent stats update while adapter is being reset, or if the pci
1215          * connection is down.
1216          */
1217         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1218                 return;
1219 
1220         spin_lock_irqsave(&adapter->stats_lock, flags);
1221 
1222         /* Update device status "adapter->stats" */
1223         stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors;
1224         stats->tx_errors = stats->tx_length_errors +
1225             stats->tx_aborted_errors +
1226             stats->tx_carrier_errors + stats->tx_timeout_count;
1227 
1228         /* Update network device status "adapter->net_stats" */
1229         netdev->stats.rx_packets = stats->rx_packets;
1230         netdev->stats.rx_bytes = stats->rx_bytes;
1231         netdev->stats.rx_dropped = stats->rx_dropped;
1232         netdev->stats.tx_packets = stats->tx_packets;
1233         netdev->stats.tx_bytes = stats->tx_bytes;
1234         netdev->stats.tx_dropped = stats->tx_dropped;
1235         /* Fill out the OS statistics structure */
1236         netdev->stats.multicast = stats->multicast;
1237         netdev->stats.collisions = stats->collisions;
1238         /* Rx Errors */
1239         netdev->stats.rx_errors = stats->rx_errors;
1240         netdev->stats.rx_crc_errors = stats->rx_crc_errors;
1241         netdev->stats.rx_frame_errors = stats->rx_frame_errors;
1242         /* Tx Errors */
1243         netdev->stats.tx_errors = stats->tx_errors;
1244         netdev->stats.tx_aborted_errors = stats->tx_aborted_errors;
1245         netdev->stats.tx_carrier_errors = stats->tx_carrier_errors;
1246 
1247         spin_unlock_irqrestore(&adapter->stats_lock, flags);
1248 }
1249 
1250 static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1251 {
1252         u32 rxdma;
1253 
1254         /* Disable Receive DMA */
1255         rxdma = ioread32(&hw->reg->DMA_CTRL);
1256         rxdma &= ~PCH_GBE_RX_DMA_EN;
1257         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1258 }
1259 
1260 static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1261 {
1262         u32 rxdma;
1263 
1264         /* Enables Receive DMA */
1265         rxdma = ioread32(&hw->reg->DMA_CTRL);
1266         rxdma |= PCH_GBE_RX_DMA_EN;
1267         iowrite32(rxdma, &hw->reg->DMA_CTRL);
1268 }
1269 
1270 /**
1271  * pch_gbe_intr - Interrupt Handler
1272  * @irq:   Interrupt number
1273  * @data:  Pointer to a network interface device structure
1274  * Returns:
1275  *      - IRQ_HANDLED:  Our interrupt
1276  *      - IRQ_NONE:     Not our interrupt
1277  */
1278 static irqreturn_t pch_gbe_intr(int irq, void *data)
1279 {
1280         struct net_device *netdev = data;
1281         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
1282         struct pch_gbe_hw *hw = &adapter->hw;
1283         u32 int_st;
1284         u32 int_en;
1285 
1286         /* Check request status */
1287         int_st = ioread32(&hw->reg->INT_ST);
1288         int_st = int_st & ioread32(&hw->reg->INT_EN);
1289         /* When request status is no interruption factor */
1290         if (unlikely(!int_st))
1291                 return IRQ_NONE;        /* Not our interrupt. End processing. */
1292         netdev_dbg(netdev, "%s occur int_st = 0x%08x\n", __func__, int_st);
1293         if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1294                 adapter->stats.intr_rx_frame_err_count++;
1295         if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1296                 if (!adapter->rx_stop_flag) {
1297                         adapter->stats.intr_rx_fifo_err_count++;
1298                         netdev_dbg(netdev, "Rx fifo over run\n");
1299                         adapter->rx_stop_flag = true;
1300                         int_en = ioread32(&hw->reg->INT_EN);
1301                         iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1302                                   &hw->reg->INT_EN);
1303                         pch_gbe_disable_dma_rx(&adapter->hw);
1304                         int_st |= ioread32(&hw->reg->INT_ST);
1305                         int_st = int_st & ioread32(&hw->reg->INT_EN);
1306                 }
1307         if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1308                 adapter->stats.intr_rx_dma_err_count++;
1309         if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
1310                 adapter->stats.intr_tx_fifo_err_count++;
1311         if (int_st & PCH_GBE_INT_TX_DMA_ERR)
1312                 adapter->stats.intr_tx_dma_err_count++;
1313         if (int_st & PCH_GBE_INT_TCPIP_ERR)
1314                 adapter->stats.intr_tcpip_err_count++;
1315         /* When Rx descriptor is empty  */
1316         if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1317                 adapter->stats.intr_rx_dsc_empty_count++;
1318                 netdev_dbg(netdev, "Rx descriptor is empty\n");
1319                 int_en = ioread32(&hw->reg->INT_EN);
1320                 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1321                 if (hw->mac.tx_fc_enable) {
1322                         /* Set Pause packet */
1323                         pch_gbe_mac_set_pause_packet(hw);
1324                 }
1325         }
1326 
1327         /* When request status is Receive interruption */
1328         if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) ||
1329             (adapter->rx_stop_flag)) {
1330                 if (likely(napi_schedule_prep(&adapter->napi))) {
1331                         /* Enable only Rx Descriptor empty */
1332                         atomic_inc(&adapter->irq_sem);
1333                         int_en = ioread32(&hw->reg->INT_EN);
1334                         int_en &=
1335                             ~(PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT);
1336                         iowrite32(int_en, &hw->reg->INT_EN);
1337                         /* Start polling for NAPI */
1338                         __napi_schedule(&adapter->napi);
1339                 }
1340         }
1341         netdev_dbg(netdev, "return = 0x%08x  INT_EN reg = 0x%08x\n",
1342                    IRQ_HANDLED, ioread32(&hw->reg->INT_EN));
1343         return IRQ_HANDLED;
1344 }
1345 
1346 /**
1347  * pch_gbe_alloc_rx_buffers - Replace used receive buffers; legacy & extended
1348  * @adapter:       Board private structure
1349  * @rx_ring:       Rx descriptor ring
1350  * @cleaned_count: Cleaned count
1351  */
1352 static void
1353 pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1354                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1355 {
1356         struct net_device *netdev = adapter->netdev;
1357         struct pci_dev *pdev = adapter->pdev;
1358         struct pch_gbe_hw *hw = &adapter->hw;
1359         struct pch_gbe_rx_desc *rx_desc;
1360         struct pch_gbe_buffer *buffer_info;
1361         struct sk_buff *skb;
1362         unsigned int i;
1363         unsigned int bufsz;
1364 
1365         bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1366         i = rx_ring->next_to_use;
1367 
1368         while ((cleaned_count--)) {
1369                 buffer_info = &rx_ring->buffer_info[i];
1370                 skb = netdev_alloc_skb(netdev, bufsz);
1371                 if (unlikely(!skb)) {
1372                         /* Better luck next round */
1373                         adapter->stats.rx_alloc_buff_failed++;
1374                         break;
1375                 }
1376                 /* align */
1377                 skb_reserve(skb, NET_IP_ALIGN);
1378                 buffer_info->skb = skb;
1379 
1380                 buffer_info->dma = dma_map_single(&pdev->dev,
1381                                                   buffer_info->rx_buffer,
1382                                                   buffer_info->length,
1383                                                   DMA_FROM_DEVICE);
1384                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
1385                         dev_kfree_skb(skb);
1386                         buffer_info->skb = NULL;
1387                         buffer_info->dma = 0;
1388                         adapter->stats.rx_alloc_buff_failed++;
1389                         break; /* while !buffer_info->skb */
1390                 }
1391                 buffer_info->mapped = true;
1392                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1393                 rx_desc->buffer_addr = (buffer_info->dma);
1394                 rx_desc->gbec_status = DSC_INIT16;
1395 
1396                 netdev_dbg(netdev,
1397                            "i = %d  buffer_info->dma = 0x08%llx  buffer_info->length = 0x%x\n",
1398                            i, (unsigned long long)buffer_info->dma,
1399                            buffer_info->length);
1400 
1401                 if (unlikely(++i == rx_ring->count))
1402                         i = 0;
1403         }
1404         if (likely(rx_ring->next_to_use != i)) {
1405                 rx_ring->next_to_use = i;
1406                 if (unlikely(i-- == 0))
1407                         i = (rx_ring->count - 1);
1408                 iowrite32(rx_ring->dma +
1409                           (int)sizeof(struct pch_gbe_rx_desc) * i,
1410                           &hw->reg->RX_DSC_SW_P);
1411         }
1412         return;
1413 }
1414 
1415 static int
1416 pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1417                          struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1418 {
1419         struct pci_dev *pdev = adapter->pdev;
1420         struct pch_gbe_buffer *buffer_info;
1421         unsigned int i;
1422         unsigned int bufsz;
1423         unsigned int size;
1424 
1425         bufsz = adapter->rx_buffer_len;
1426 
1427         size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1428         rx_ring->rx_buff_pool =
1429                 dma_alloc_coherent(&pdev->dev, size,
1430                                    &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1431         if (!rx_ring->rx_buff_pool)
1432                 return -ENOMEM;
1433 
1434         rx_ring->rx_buff_pool_size = size;
1435         for (i = 0; i < rx_ring->count; i++) {
1436                 buffer_info = &rx_ring->buffer_info[i];
1437                 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1438                 buffer_info->length = bufsz;
1439         }
1440         return 0;
1441 }
1442 
1443 /**
1444  * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1445  * @adapter:   Board private structure
1446  * @tx_ring:   Tx descriptor ring
1447  */
1448 static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
1449                                         struct pch_gbe_tx_ring *tx_ring)
1450 {
1451         struct pch_gbe_buffer *buffer_info;
1452         struct sk_buff *skb;
1453         unsigned int i;
1454         unsigned int bufsz;
1455         struct pch_gbe_tx_desc *tx_desc;
1456 
1457         bufsz =
1458             adapter->hw.mac.max_frame_size + PCH_GBE_DMA_ALIGN + NET_IP_ALIGN;
1459 
1460         for (i = 0; i < tx_ring->count; i++) {
1461                 buffer_info = &tx_ring->buffer_info[i];
1462                 skb = netdev_alloc_skb(adapter->netdev, bufsz);
1463                 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1464                 buffer_info->skb = skb;
1465                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1466                 tx_desc->gbec_status = (DSC_INIT16);
1467         }
1468         return;
1469 }
1470 
1471 /**
1472  * pch_gbe_clean_tx - Reclaim resources after transmit completes
1473  * @adapter:   Board private structure
1474  * @tx_ring:   Tx descriptor ring
1475  * Returns:
1476  *      true:  Cleaned the descriptor
1477  *      false: Not cleaned the descriptor
1478  */
1479 static bool
1480 pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
1481                  struct pch_gbe_tx_ring *tx_ring)
1482 {
1483         struct pch_gbe_tx_desc *tx_desc;
1484         struct pch_gbe_buffer *buffer_info;
1485         struct sk_buff *skb;
1486         unsigned int i;
1487         unsigned int cleaned_count = 0;
1488         bool cleaned = false;
1489         int unused, thresh;
1490 
1491         netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1492                    tx_ring->next_to_clean);
1493 
1494         i = tx_ring->next_to_clean;
1495         tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1496         netdev_dbg(adapter->netdev, "gbec_status:0x%04x  dma_status:0x%04x\n",
1497                    tx_desc->gbec_status, tx_desc->dma_status);
1498 
1499         unused = PCH_GBE_DESC_UNUSED(tx_ring);
1500         thresh = tx_ring->count - PCH_GBE_TX_WEIGHT;
1501         if ((tx_desc->gbec_status == DSC_INIT16) && (unused < thresh))
1502         {  /* current marked clean, tx queue filling up, do extra clean */
1503                 int j, k;
1504                 if (unused < 8) {  /* tx queue nearly full */
1505                         netdev_dbg(adapter->netdev,
1506                                    "clean_tx: transmit queue warning (%x,%x) unused=%d\n",
1507                                    tx_ring->next_to_clean, tx_ring->next_to_use,
1508                                    unused);
1509                 }
1510 
1511                 /* current marked clean, scan for more that need cleaning. */
1512                 k = i;
1513                 for (j = 0; j < PCH_GBE_TX_WEIGHT; j++)
1514                 {
1515                         tx_desc = PCH_GBE_TX_DESC(*tx_ring, k);
1516                         if (tx_desc->gbec_status != DSC_INIT16) break; /*found*/
1517                         if (++k >= tx_ring->count) k = 0;  /*increment, wrap*/
1518                 }
1519                 if (j < PCH_GBE_TX_WEIGHT) {
1520                         netdev_dbg(adapter->netdev,
1521                                    "clean_tx: unused=%d loops=%d found tx_desc[%x,%x:%x].gbec_status=%04x\n",
1522                                    unused, j, i, k, tx_ring->next_to_use,
1523                                    tx_desc->gbec_status);
1524                         i = k;  /*found one to clean, usu gbec_status==2000.*/
1525                 }
1526         }
1527 
1528         while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) {
1529                 netdev_dbg(adapter->netdev, "gbec_status:0x%04x\n",
1530                            tx_desc->gbec_status);
1531                 buffer_info = &tx_ring->buffer_info[i];
1532                 skb = buffer_info->skb;
1533                 cleaned = true;
1534 
1535                 if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_ABT)) {
1536                         adapter->stats.tx_aborted_errors++;
1537                         netdev_err(adapter->netdev, "Transfer Abort Error\n");
1538                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CRSER)
1539                           ) {
1540                         adapter->stats.tx_carrier_errors++;
1541                         netdev_err(adapter->netdev,
1542                                    "Transfer Carrier Sense Error\n");
1543                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_EXCOL)
1544                           ) {
1545                         adapter->stats.tx_aborted_errors++;
1546                         netdev_err(adapter->netdev,
1547                                    "Transfer Collision Abort Error\n");
1548                 } else if ((tx_desc->gbec_status &
1549                             (PCH_GBE_TXD_GMAC_STAT_SNGCOL |
1550                              PCH_GBE_TXD_GMAC_STAT_MLTCOL))) {
1551                         adapter->stats.collisions++;
1552                         adapter->stats.tx_packets++;
1553                         adapter->stats.tx_bytes += skb->len;
1554                         netdev_dbg(adapter->netdev, "Transfer Collision\n");
1555                 } else if ((tx_desc->gbec_status & PCH_GBE_TXD_GMAC_STAT_CMPLT)
1556                           ) {
1557                         adapter->stats.tx_packets++;
1558                         adapter->stats.tx_bytes += skb->len;
1559                 }
1560                 if (buffer_info->mapped) {
1561                         netdev_dbg(adapter->netdev,
1562                                    "unmap buffer_info->dma : %d\n", i);
1563                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1564                                          buffer_info->length, DMA_TO_DEVICE);
1565                         buffer_info->mapped = false;
1566                 }
1567                 if (buffer_info->skb) {
1568                         netdev_dbg(adapter->netdev,
1569                                    "trim buffer_info->skb : %d\n", i);
1570                         skb_trim(buffer_info->skb, 0);
1571                 }
1572                 tx_desc->gbec_status = DSC_INIT16;
1573                 if (unlikely(++i == tx_ring->count))
1574                         i = 0;
1575                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, i);
1576 
1577                 /* weight of a sort for tx, to avoid endless transmit cleanup */
1578                 if (cleaned_count++ == PCH_GBE_TX_WEIGHT) {
1579                         cleaned = false;
1580                         break;
1581                 }
1582         }
1583         netdev_dbg(adapter->netdev,
1584                    "called pch_gbe_unmap_and_free_tx_resource() %d count\n",
1585                    cleaned_count);
1586         if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
1587                 /* Recover from running out of Tx resources in xmit_frame */
1588                 netif_tx_lock(adapter->netdev);
1589                 if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
1590                 {
1591                         netif_wake_queue(adapter->netdev);
1592                         adapter->stats.tx_restart_count++;
1593                         netdev_dbg(adapter->netdev, "Tx wake queue\n");
1594                 }
1595 
1596                 tx_ring->next_to_clean = i;
1597 
1598                 netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
1599                            tx_ring->next_to_clean);
1600                 netif_tx_unlock(adapter->netdev);
1601         }
1602         return cleaned;
1603 }
1604 
1605 /**
1606  * pch_gbe_clean_rx - Send received data up the network stack; legacy
1607  * @adapter:     Board private structure
1608  * @rx_ring:     Rx descriptor ring
1609  * @work_done:   Completed count
1610  * @work_to_do:  Request count
1611  * Returns:
1612  *      true:  Cleaned the descriptor
1613  *      false: Not cleaned the descriptor
1614  */
1615 static bool
1616 pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1617                  struct pch_gbe_rx_ring *rx_ring,
1618                  int *work_done, int work_to_do)
1619 {
1620         struct net_device *netdev = adapter->netdev;
1621         struct pci_dev *pdev = adapter->pdev;
1622         struct pch_gbe_buffer *buffer_info;
1623         struct pch_gbe_rx_desc *rx_desc;
1624         u32 length;
1625         unsigned int i;
1626         unsigned int cleaned_count = 0;
1627         bool cleaned = false;
1628         struct sk_buff *skb;
1629         u8 dma_status;
1630         u16 gbec_status;
1631         u32 tcp_ip_status;
1632 
1633         i = rx_ring->next_to_clean;
1634 
1635         while (*work_done < work_to_do) {
1636                 /* Check Rx descriptor status */
1637                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, i);
1638                 if (rx_desc->gbec_status == DSC_INIT16)
1639                         break;
1640                 cleaned = true;
1641                 cleaned_count++;
1642 
1643                 dma_status = rx_desc->dma_status;
1644                 gbec_status = rx_desc->gbec_status;
1645                 tcp_ip_status = rx_desc->tcp_ip_status;
1646                 rx_desc->gbec_status = DSC_INIT16;
1647                 buffer_info = &rx_ring->buffer_info[i];
1648                 skb = buffer_info->skb;
1649                 buffer_info->skb = NULL;
1650 
1651                 /* unmap dma */
1652                 dma_unmap_single(&pdev->dev, buffer_info->dma,
1653                                    buffer_info->length, DMA_FROM_DEVICE);
1654                 buffer_info->mapped = false;
1655 
1656                 netdev_dbg(netdev,
1657                            "RxDecNo = 0x%04x  Status[DMA:0x%02x GBE:0x%04x TCP:0x%08x]  BufInf = 0x%p\n",
1658                            i, dma_status, gbec_status, tcp_ip_status,
1659                            buffer_info);
1660                 /* Error check */
1661                 if (unlikely(gbec_status & PCH_GBE_RXD_GMAC_STAT_NOTOCTAL)) {
1662                         adapter->stats.rx_frame_errors++;
1663                         netdev_err(netdev, "Receive Not Octal Error\n");
1664                 } else if (unlikely(gbec_status &
1665                                 PCH_GBE_RXD_GMAC_STAT_NBLERR)) {
1666                         adapter->stats.rx_frame_errors++;
1667                         netdev_err(netdev, "Receive Nibble Error\n");
1668                 } else if (unlikely(gbec_status &
1669                                 PCH_GBE_RXD_GMAC_STAT_CRCERR)) {
1670                         adapter->stats.rx_crc_errors++;
1671                         netdev_err(netdev, "Receive CRC Error\n");
1672                 } else {
1673                         /* get receive length */
1674                         /* length convert[-3], length includes FCS length */
1675                         length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1676                         if (rx_desc->rx_words_eob & 0x02)
1677                                 length = length - 4;
1678                         /*
1679                          * buffer_info->rx_buffer: [Header:14][payload]
1680                          * skb->data: [Reserve:2][Header:14][payload]
1681                          */
1682                         memcpy(skb->data, buffer_info->rx_buffer, length);
1683 
1684                         /* update status of driver */
1685                         adapter->stats.rx_bytes += length;
1686                         adapter->stats.rx_packets++;
1687                         if ((gbec_status & PCH_GBE_RXD_GMAC_STAT_MARMLT))
1688                                 adapter->stats.multicast++;
1689                         /* Write meta date of skb */
1690                         skb_put(skb, length);
1691 
1692                         pch_rx_timestamp(adapter, skb);
1693 
1694                         skb->protocol = eth_type_trans(skb, netdev);
1695                         if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
1696                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1697                         else
1698                                 skb->ip_summed = CHECKSUM_NONE;
1699 
1700                         napi_gro_receive(&adapter->napi, skb);
1701                         (*work_done)++;
1702                         netdev_dbg(netdev,
1703                                    "Receive skb->ip_summed: %d length: %d\n",
1704                                    skb->ip_summed, length);
1705                 }
1706                 /* return some buffers to hardware, one at a time is too slow */
1707                 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1708                         pch_gbe_alloc_rx_buffers(adapter, rx_ring,
1709                                                  cleaned_count);
1710                         cleaned_count = 0;
1711                 }
1712                 if (++i == rx_ring->count)
1713                         i = 0;
1714         }
1715         rx_ring->next_to_clean = i;
1716         if (cleaned_count)
1717                 pch_gbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
1718         return cleaned;
1719 }
1720 
1721 /**
1722  * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
1723  * @adapter:  Board private structure
1724  * @tx_ring:  Tx descriptor ring (for a specific queue) to setup
1725  * Returns:
1726  *      0:              Successfully
1727  *      Negative value: Failed
1728  */
1729 int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1730                                 struct pch_gbe_tx_ring *tx_ring)
1731 {
1732         struct pci_dev *pdev = adapter->pdev;
1733         struct pch_gbe_tx_desc *tx_desc;
1734         int size;
1735         int desNo;
1736 
1737         size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1738         tx_ring->buffer_info = vzalloc(size);
1739         if (!tx_ring->buffer_info)
1740                 return -ENOMEM;
1741 
1742         tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1743 
1744         tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1745                                            &tx_ring->dma, GFP_KERNEL);
1746         if (!tx_ring->desc) {
1747                 vfree(tx_ring->buffer_info);
1748                 return -ENOMEM;
1749         }
1750 
1751         tx_ring->next_to_use = 0;
1752         tx_ring->next_to_clean = 0;
1753 
1754         for (desNo = 0; desNo < tx_ring->count; desNo++) {
1755                 tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
1756                 tx_desc->gbec_status = DSC_INIT16;
1757         }
1758         netdev_dbg(adapter->netdev,
1759                    "tx_ring->desc = 0x%p  tx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1760                    tx_ring->desc, (unsigned long long)tx_ring->dma,
1761                    tx_ring->next_to_clean, tx_ring->next_to_use);
1762         return 0;
1763 }
1764 
1765 /**
1766  * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
1767  * @adapter:  Board private structure
1768  * @rx_ring:  Rx descriptor ring (for a specific queue) to setup
1769  * Returns:
1770  *      0:              Successfully
1771  *      Negative value: Failed
1772  */
1773 int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1774                                 struct pch_gbe_rx_ring *rx_ring)
1775 {
1776         struct pci_dev *pdev = adapter->pdev;
1777         struct pch_gbe_rx_desc *rx_desc;
1778         int size;
1779         int desNo;
1780 
1781         size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1782         rx_ring->buffer_info = vzalloc(size);
1783         if (!rx_ring->buffer_info)
1784                 return -ENOMEM;
1785 
1786         rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1787         rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1788                                                   &rx_ring->dma, GFP_KERNEL);
1789         if (!rx_ring->desc) {
1790                 vfree(rx_ring->buffer_info);
1791                 return -ENOMEM;
1792         }
1793         rx_ring->next_to_clean = 0;
1794         rx_ring->next_to_use = 0;
1795         for (desNo = 0; desNo < rx_ring->count; desNo++) {
1796                 rx_desc = PCH_GBE_RX_DESC(*rx_ring, desNo);
1797                 rx_desc->gbec_status = DSC_INIT16;
1798         }
1799         netdev_dbg(adapter->netdev,
1800                    "rx_ring->desc = 0x%p  rx_ring->dma = 0x%08llx next_to_clean = 0x%08x  next_to_use = 0x%08x\n",
1801                    rx_ring->desc, (unsigned long long)rx_ring->dma,
1802                    rx_ring->next_to_clean, rx_ring->next_to_use);
1803         return 0;
1804 }
1805 
1806 /**
1807  * pch_gbe_free_tx_resources - Free Tx Resources
1808  * @adapter:  Board private structure
1809  * @tx_ring:  Tx descriptor ring for a specific queue
1810  */
1811 void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
1812                                 struct pch_gbe_tx_ring *tx_ring)
1813 {
1814         struct pci_dev *pdev = adapter->pdev;
1815 
1816         pch_gbe_clean_tx_ring(adapter, tx_ring);
1817         vfree(tx_ring->buffer_info);
1818         tx_ring->buffer_info = NULL;
1819         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1820         tx_ring->desc = NULL;
1821 }
1822 
1823 /**
1824  * pch_gbe_free_rx_resources - Free Rx Resources
1825  * @adapter:  Board private structure
1826  * @rx_ring:  Ring to clean the resources from
1827  */
1828 void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
1829                                 struct pch_gbe_rx_ring *rx_ring)
1830 {
1831         struct pci_dev *pdev = adapter->pdev;
1832 
1833         pch_gbe_clean_rx_ring(adapter, rx_ring);
1834         vfree(rx_ring->buffer_info);
1835         rx_ring->buffer_info = NULL;
1836         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1837         rx_ring->desc = NULL;
1838 }
1839 
1840 /**
1841  * pch_gbe_request_irq - Allocate an interrupt line
1842  * @adapter:  Board private structure
1843  * Returns:
1844  *      0:              Successfully
1845  *      Negative value: Failed
1846  */
1847 static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1848 {
1849         struct net_device *netdev = adapter->netdev;
1850         int err;
1851 
1852         err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES);
1853         if (err < 0)
1854                 return err;
1855 
1856         adapter->irq = pci_irq_vector(adapter->pdev, 0);
1857 
1858         err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED,
1859                           netdev->name, netdev);
1860         if (err)
1861                 netdev_err(netdev, "Unable to allocate interrupt Error: %d\n",
1862                            err);
1863         netdev_dbg(netdev, "have_msi : %d  return : 0x%04x\n",
1864                    pci_dev_msi_enabled(adapter->pdev), err);
1865         return err;
1866 }
1867 
1868 /**
1869  * pch_gbe_up - Up GbE network device
1870  * @adapter:  Board private structure
1871  * Returns:
1872  *      0:              Successfully
1873  *      Negative value: Failed
1874  */
1875 int pch_gbe_up(struct pch_gbe_adapter *adapter)
1876 {
1877         struct net_device *netdev = adapter->netdev;
1878         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1879         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1880         int err = -EINVAL;
1881 
1882         /* Ensure we have a valid MAC */
1883         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1884                 netdev_err(netdev, "Error: Invalid MAC address\n");
1885                 goto out;
1886         }
1887 
1888         /* hardware has been reset, we need to reload some things */
1889         pch_gbe_set_multi(netdev);
1890 
1891         pch_gbe_setup_tctl(adapter);
1892         pch_gbe_configure_tx(adapter);
1893         pch_gbe_setup_rctl(adapter);
1894         pch_gbe_configure_rx(adapter);
1895 
1896         err = pch_gbe_request_irq(adapter);
1897         if (err) {
1898                 netdev_err(netdev,
1899                            "Error: can't bring device up - irq request failed\n");
1900                 goto out;
1901         }
1902         err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1903         if (err) {
1904                 netdev_err(netdev,
1905                            "Error: can't bring device up - alloc rx buffers pool failed\n");
1906                 goto freeirq;
1907         }
1908         pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1909         pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1910         adapter->tx_queue_len = netdev->tx_queue_len;
1911         pch_gbe_enable_dma_rx(&adapter->hw);
1912         pch_gbe_enable_mac_rx(&adapter->hw);
1913 
1914         mod_timer(&adapter->watchdog_timer, jiffies);
1915 
1916         napi_enable(&adapter->napi);
1917         pch_gbe_irq_enable(adapter);
1918         netif_start_queue(adapter->netdev);
1919 
1920         return 0;
1921 
1922 freeirq:
1923         pch_gbe_free_irq(adapter);
1924 out:
1925         return err;
1926 }
1927 
1928 /**
1929  * pch_gbe_down - Down GbE network device
1930  * @adapter:  Board private structure
1931  */
1932 void pch_gbe_down(struct pch_gbe_adapter *adapter)
1933 {
1934         struct net_device *netdev = adapter->netdev;
1935         struct pci_dev *pdev = adapter->pdev;
1936         struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1937 
1938         /* signal that we're down so the interrupt handler does not
1939          * reschedule our watchdog timer */
1940         napi_disable(&adapter->napi);
1941         atomic_set(&adapter->irq_sem, 0);
1942 
1943         pch_gbe_irq_disable(adapter);
1944         pch_gbe_free_irq(adapter);
1945 
1946         del_timer_sync(&adapter->watchdog_timer);
1947 
1948         netdev->tx_queue_len = adapter->tx_queue_len;
1949         netif_carrier_off(netdev);
1950         netif_stop_queue(netdev);
1951 
1952         if ((pdev->error_state) && (pdev->error_state != pci_channel_io_normal))
1953                 pch_gbe_reset(adapter);
1954         pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1955         pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1956 
1957         pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1958                             rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1959         rx_ring->rx_buff_pool_logic = 0;
1960         rx_ring->rx_buff_pool_size = 0;
1961         rx_ring->rx_buff_pool = NULL;
1962 }
1963 
1964 /**
1965  * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
1966  * @adapter:  Board private structure to initialize
1967  * Returns:
1968  *      0:              Successfully
1969  *      Negative value: Failed
1970  */
1971 static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
1972 {
1973         struct pch_gbe_hw *hw = &adapter->hw;
1974         struct net_device *netdev = adapter->netdev;
1975 
1976         adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
1977         hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1978         hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1979         hw->phy.reset_delay_us = PCH_GBE_PHY_RESET_DELAY_US;
1980 
1981         if (pch_gbe_alloc_queues(adapter)) {
1982                 netdev_err(netdev, "Unable to allocate memory for queues\n");
1983                 return -ENOMEM;
1984         }
1985         spin_lock_init(&adapter->hw.miim_lock);
1986         spin_lock_init(&adapter->stats_lock);
1987         spin_lock_init(&adapter->ethtool_lock);
1988         atomic_set(&adapter->irq_sem, 0);
1989         pch_gbe_irq_disable(adapter);
1990 
1991         pch_gbe_init_stats(adapter);
1992 
1993         netdev_dbg(netdev,
1994                    "rx_buffer_len : %d  mac.min_frame_size : %d  mac.max_frame_size : %d\n",
1995                    (u32) adapter->rx_buffer_len,
1996                    hw->mac.min_frame_size, hw->mac.max_frame_size);
1997         return 0;
1998 }
1999 
2000 /**
2001  * pch_gbe_open - Called when a network interface is made active
2002  * @netdev:     Network interface device structure
2003  * Returns:
2004  *      0:              Successfully
2005  *      Negative value: Failed
2006  */
2007 static int pch_gbe_open(struct net_device *netdev)
2008 {
2009         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2010         struct pch_gbe_hw *hw = &adapter->hw;
2011         int err;
2012 
2013         /* allocate transmit descriptors */
2014         err = pch_gbe_setup_tx_resources(adapter, adapter->tx_ring);
2015         if (err)
2016                 goto err_setup_tx;
2017         /* allocate receive descriptors */
2018         err = pch_gbe_setup_rx_resources(adapter, adapter->rx_ring);
2019         if (err)
2020                 goto err_setup_rx;
2021         pch_gbe_phy_power_up(hw);
2022         err = pch_gbe_up(adapter);
2023         if (err)
2024                 goto err_up;
2025         netdev_dbg(netdev, "Success End\n");
2026         return 0;
2027 
2028 err_up:
2029         if (!adapter->wake_up_evt)
2030                 pch_gbe_phy_power_down(hw);
2031         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2032 err_setup_rx:
2033         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2034 err_setup_tx:
2035         pch_gbe_reset(adapter);
2036         netdev_err(netdev, "Error End\n");
2037         return err;
2038 }
2039 
2040 /**
2041  * pch_gbe_stop - Disables a network interface
2042  * @netdev:  Network interface device structure
2043  * Returns:
2044  *      0: Successfully
2045  */
2046 static int pch_gbe_stop(struct net_device *netdev)
2047 {
2048         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2049         struct pch_gbe_hw *hw = &adapter->hw;
2050 
2051         pch_gbe_down(adapter);
2052         if (!adapter->wake_up_evt)
2053                 pch_gbe_phy_power_down(hw);
2054         pch_gbe_free_tx_resources(adapter, adapter->tx_ring);
2055         pch_gbe_free_rx_resources(adapter, adapter->rx_ring);
2056         return 0;
2057 }
2058 
2059 /**
2060  * pch_gbe_xmit_frame - Packet transmitting start
2061  * @skb:     Socket buffer structure
2062  * @netdev:  Network interface device structure
2063  * Returns:
2064  *      - NETDEV_TX_OK:   Normal end
2065  *      - NETDEV_TX_BUSY: Error end
2066  */
2067 static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2068 {
2069         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2070         struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
2071 
2072         if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
2073                 netif_stop_queue(netdev);
2074                 netdev_dbg(netdev,
2075                            "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
2076                            tx_ring->next_to_use, tx_ring->next_to_clean);
2077                 return NETDEV_TX_BUSY;
2078         }
2079 
2080         /* CRC,ITAG no support */
2081         pch_gbe_tx_queue(adapter, tx_ring, skb);
2082         return NETDEV_TX_OK;
2083 }
2084 
2085 /**
2086  * pch_gbe_set_multi - Multicast and Promiscuous mode set
2087  * @netdev:   Network interface device structure
2088  */
2089 static void pch_gbe_set_multi(struct net_device *netdev)
2090 {
2091         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2092         struct pch_gbe_hw *hw = &adapter->hw;
2093         struct netdev_hw_addr *ha;
2094         u32 rctl, adrmask;
2095         int mc_count, i;
2096 
2097         netdev_dbg(netdev, "netdev->flags : 0x%08x\n", netdev->flags);
2098 
2099         /* By default enable address & multicast filtering */
2100         rctl = ioread32(&hw->reg->RX_MODE);
2101         rctl |= PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN;
2102 
2103         /* Promiscuous mode disables all hardware address filtering */
2104         if (netdev->flags & IFF_PROMISC)
2105                 rctl &= ~(PCH_GBE_ADD_FIL_EN | PCH_GBE_MLT_FIL_EN);
2106 
2107         /* If we want to monitor more multicast addresses than the hardware can
2108          * support then disable hardware multicast filtering.
2109          */
2110         mc_count = netdev_mc_count(netdev);
2111         if ((netdev->flags & IFF_ALLMULTI) || mc_count >= PCH_GBE_MAR_ENTRIES)
2112                 rctl &= ~PCH_GBE_MLT_FIL_EN;
2113 
2114         iowrite32(rctl, &hw->reg->RX_MODE);
2115 
2116         /* If we're not using multicast filtering then there's no point
2117          * configuring the unused MAC address registers.
2118          */
2119         if (!(rctl & PCH_GBE_MLT_FIL_EN))
2120                 return;
2121 
2122         /* Load the first set of multicast addresses into MAC address registers
2123          * for use by hardware filtering.
2124          */
2125         i = 1;
2126         netdev_for_each_mc_addr(ha, netdev)
2127                 pch_gbe_mac_mar_set(hw, ha->addr, i++);
2128 
2129         /* If there are spare MAC registers, mask & clear them */
2130         for (; i < PCH_GBE_MAR_ENTRIES; i++) {
2131                 /* Clear MAC address mask */
2132                 adrmask = ioread32(&hw->reg->ADDR_MASK);
2133                 iowrite32(adrmask | BIT(i), &hw->reg->ADDR_MASK);
2134                 /* wait busy */
2135                 pch_gbe_wait_clr_bit(&hw->reg->ADDR_MASK, PCH_GBE_BUSY);
2136                 /* Clear MAC address */
2137                 iowrite32(0, &hw->reg->mac_adr[i].high);
2138                 iowrite32(0, &hw->reg->mac_adr[i].low);
2139         }
2140 
2141         netdev_dbg(netdev,
2142                  "RX_MODE reg(check bit31,30 ADD,MLT) : 0x%08x  netdev->mc_count : 0x%08x\n",
2143                  ioread32(&hw->reg->RX_MODE), mc_count);
2144 }
2145 
2146 /**
2147  * pch_gbe_set_mac - Change the Ethernet Address of the NIC
2148  * @netdev: Network interface device structure
2149  * @addr:   Pointer to an address structure
2150  * Returns:
2151  *      0:              Successfully
2152  *      -EADDRNOTAVAIL: Failed
2153  */
2154 static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
2155 {
2156         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2157         struct sockaddr *skaddr = addr;
2158         int ret_val;
2159 
2160         if (!is_valid_ether_addr(skaddr->sa_data)) {
2161                 ret_val = -EADDRNOTAVAIL;
2162         } else {
2163                 memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
2164                 memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
2165                 pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2166                 ret_val = 0;
2167         }
2168         netdev_dbg(netdev, "ret_val : 0x%08x\n", ret_val);
2169         netdev_dbg(netdev, "dev_addr : %pM\n", netdev->dev_addr);
2170         netdev_dbg(netdev, "mac_addr : %pM\n", adapter->hw.mac.addr);
2171         netdev_dbg(netdev, "MAC_ADR1AB reg : 0x%08x 0x%08x\n",
2172                    ioread32(&adapter->hw.reg->mac_adr[0].high),
2173                    ioread32(&adapter->hw.reg->mac_adr[0].low));
2174         return ret_val;
2175 }
2176 
2177 /**
2178  * pch_gbe_change_mtu - Change the Maximum Transfer Unit
2179  * @netdev:   Network interface device structure
2180  * @new_mtu:  New value for maximum frame size
2181  * Returns:
2182  *      0:              Successfully
2183  *      -EINVAL:        Failed
2184  */
2185 static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2186 {
2187         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2188         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2189         unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2190         int err;
2191 
2192         if (max_frame <= PCH_GBE_FRAME_SIZE_2048)
2193                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_2048;
2194         else if (max_frame <= PCH_GBE_FRAME_SIZE_4096)
2195                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_4096;
2196         else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2197                 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2198         else
2199                 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2200 
2201         if (netif_running(netdev)) {
2202                 pch_gbe_down(adapter);
2203                 err = pch_gbe_up(adapter);
2204                 if (err) {
2205                         adapter->rx_buffer_len = old_rx_buffer_len;
2206                         pch_gbe_up(adapter);
2207                         return err;
2208                 } else {
2209                         netdev->mtu = new_mtu;
2210                         adapter->hw.mac.max_frame_size = max_frame;
2211                 }
2212         } else {
2213                 pch_gbe_reset(adapter);
2214                 netdev->mtu = new_mtu;
2215                 adapter->hw.mac.max_frame_size = max_frame;
2216         }
2217 
2218         netdev_dbg(netdev,
2219                    "max_frame : %d  rx_buffer_len : %d  mtu : %d  max_frame_size : %d\n",
2220                    max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
2221                    adapter->hw.mac.max_frame_size);
2222         return 0;
2223 }
2224 
2225 /**
2226  * pch_gbe_set_features - Reset device after features changed
2227  * @netdev:   Network interface device structure
2228  * @features:  New features
2229  * Returns:
2230  *      0:              HW state updated successfully
2231  */
2232 static int pch_gbe_set_features(struct net_device *netdev,
2233         netdev_features_t features)
2234 {
2235         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2236         netdev_features_t changed = features ^ netdev->features;
2237 
2238         if (!(changed & NETIF_F_RXCSUM))
2239                 return 0;
2240 
2241         if (netif_running(netdev))
2242                 pch_gbe_reinit_locked(adapter);
2243         else
2244                 pch_gbe_reset(adapter);
2245 
2246         return 0;
2247 }
2248 
2249 /**
2250  * pch_gbe_ioctl - Controls register through a MII interface
2251  * @netdev:   Network interface device structure
2252  * @ifr:      Pointer to ifr structure
2253  * @cmd:      Control command
2254  * Returns:
2255  *      0:      Successfully
2256  *      Negative value: Failed
2257  */
2258 static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2259 {
2260         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2261 
2262         netdev_dbg(netdev, "cmd : 0x%04x\n", cmd);
2263 
2264         if (cmd == SIOCSHWTSTAMP)
2265                 return hwtstamp_ioctl(netdev, ifr, cmd);
2266 
2267         return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2268 }
2269 
2270 /**
2271  * pch_gbe_tx_timeout - Respond to a Tx Hang
2272  * @netdev:   Network interface device structure
2273  */
2274 static void pch_gbe_tx_timeout(struct net_device *netdev)
2275 {
2276         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2277 
2278         /* Do the reset outside of interrupt context */
2279         adapter->stats.tx_timeout_count++;
2280         schedule_work(&adapter->reset_task);
2281 }
2282 
2283 /**
2284  * pch_gbe_napi_poll - NAPI receive and transfer polling callback
2285  * @napi:    Pointer of polling device struct
2286  * @budget:  The maximum number of a packet
2287  * Returns:
2288  *      false:  Exit the polling mode
2289  *      true:   Continue the polling mode
2290  */
2291 static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2292 {
2293         struct pch_gbe_adapter *adapter =
2294             container_of(napi, struct pch_gbe_adapter, napi);
2295         int work_done = 0;
2296         bool poll_end_flag = false;
2297         bool cleaned = false;
2298 
2299         netdev_dbg(adapter->netdev, "budget : %d\n", budget);
2300 
2301         pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2302         cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2303 
2304         if (cleaned)
2305                 work_done = budget;
2306         /* If no Tx and not enough Rx work done,
2307          * exit the polling mode
2308          */
2309         if (work_done < budget)
2310                 poll_end_flag = true;
2311 
2312         if (poll_end_flag) {
2313                 napi_complete_done(napi, work_done);
2314                 pch_gbe_irq_enable(adapter);
2315         }
2316 
2317         if (adapter->rx_stop_flag) {
2318                 adapter->rx_stop_flag = false;
2319                 pch_gbe_enable_dma_rx(&adapter->hw);
2320         }
2321 
2322         netdev_dbg(adapter->netdev,
2323                    "poll_end_flag : %d  work_done : %d  budget : %d\n",
2324                    poll_end_flag, work_done, budget);
2325 
2326         return work_done;
2327 }
2328 
2329 #ifdef CONFIG_NET_POLL_CONTROLLER
2330 /**
2331  * pch_gbe_netpoll - Used by things like netconsole to send skbs
2332  * @netdev:  Network interface device structure
2333  */
2334 static void pch_gbe_netpoll(struct net_device *netdev)
2335 {
2336         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2337 
2338         disable_irq(adapter->irq);
2339         pch_gbe_intr(adapter->irq, netdev);
2340         enable_irq(adapter->irq);
2341 }
2342 #endif
2343 
2344 static const struct net_device_ops pch_gbe_netdev_ops = {
2345         .ndo_open = pch_gbe_open,
2346         .ndo_stop = pch_gbe_stop,
2347         .ndo_start_xmit = pch_gbe_xmit_frame,
2348         .ndo_set_mac_address = pch_gbe_set_mac,
2349         .ndo_tx_timeout = pch_gbe_tx_timeout,
2350         .ndo_change_mtu = pch_gbe_change_mtu,
2351         .ndo_set_features = pch_gbe_set_features,
2352         .ndo_do_ioctl = pch_gbe_ioctl,
2353         .ndo_set_rx_mode = pch_gbe_set_multi,
2354 #ifdef CONFIG_NET_POLL_CONTROLLER
2355         .ndo_poll_controller = pch_gbe_netpoll,
2356 #endif
2357 };
2358 
2359 static pci_ers_result_t pch_gbe_io_error_detected(struct pci_dev *pdev,
2360                                                 pci_channel_state_t state)
2361 {
2362         struct net_device *netdev = pci_get_drvdata(pdev);
2363         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2364 
2365         netif_device_detach(netdev);
2366         if (netif_running(netdev))
2367                 pch_gbe_down(adapter);
2368         pci_disable_device(pdev);
2369         /* Request a slot slot reset. */
2370         return PCI_ERS_RESULT_NEED_RESET;
2371 }
2372 
2373 static pci_ers_result_t pch_gbe_io_slot_reset(struct pci_dev *pdev)
2374 {
2375         struct net_device *netdev = pci_get_drvdata(pdev);
2376         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2377         struct pch_gbe_hw *hw = &adapter->hw;
2378 
2379         if (pci_enable_device(pdev)) {
2380                 netdev_err(netdev, "Cannot re-enable PCI device after reset\n");
2381                 return PCI_ERS_RESULT_DISCONNECT;
2382         }
2383         pci_set_master(pdev);
2384         pci_enable_wake(pdev, PCI_D0, 0);
2385         pch_gbe_phy_power_up(hw);
2386         pch_gbe_reset(adapter);
2387         /* Clear wake up status */
2388         pch_gbe_mac_set_wol_event(hw, 0);
2389 
2390         return PCI_ERS_RESULT_RECOVERED;
2391 }
2392 
2393 static void pch_gbe_io_resume(struct pci_dev *pdev)
2394 {
2395         struct net_device *netdev = pci_get_drvdata(pdev);
2396         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2397 
2398         if (netif_running(netdev)) {
2399                 if (pch_gbe_up(adapter)) {
2400                         netdev_dbg(netdev,
2401                                    "can't bring device back up after reset\n");
2402                         return;
2403                 }
2404         }
2405         netif_device_attach(netdev);
2406 }
2407 
2408 static int __pch_gbe_suspend(struct pci_dev *pdev)
2409 {
2410         struct net_device *netdev = pci_get_drvdata(pdev);
2411         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2412         struct pch_gbe_hw *hw = &adapter->hw;
2413         u32 wufc = adapter->wake_up_evt;
2414         int retval = 0;
2415 
2416         netif_device_detach(netdev);
2417         if (netif_running(netdev))
2418                 pch_gbe_down(adapter);
2419         if (wufc) {
2420                 pch_gbe_set_multi(netdev);
2421                 pch_gbe_setup_rctl(adapter);
2422                 pch_gbe_configure_rx(adapter);
2423                 pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
2424                                         hw->mac.link_duplex);
2425                 pch_gbe_set_mode(adapter, hw->mac.link_speed,
2426                                         hw->mac.link_duplex);
2427                 pch_gbe_mac_set_wol_event(hw, wufc);
2428                 pci_disable_device(pdev);
2429         } else {
2430                 pch_gbe_phy_power_down(hw);
2431                 pch_gbe_mac_set_wol_event(hw, wufc);
2432                 pci_disable_device(pdev);
2433         }
2434         return retval;
2435 }
2436 
2437 #ifdef CONFIG_PM
2438 static int pch_gbe_suspend(struct device *device)
2439 {
2440         struct pci_dev *pdev = to_pci_dev(device);
2441 
2442         return __pch_gbe_suspend(pdev);
2443 }
2444 
2445 static int pch_gbe_resume(struct device *device)
2446 {
2447         struct pci_dev *pdev = to_pci_dev(device);
2448         struct net_device *netdev = pci_get_drvdata(pdev);
2449         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2450         struct pch_gbe_hw *hw = &adapter->hw;
2451         u32 err;
2452 
2453         err = pci_enable_device(pdev);
2454         if (err) {
2455                 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
2456                 return err;
2457         }
2458         pci_set_master(pdev);
2459         pch_gbe_phy_power_up(hw);
2460         pch_gbe_reset(adapter);
2461         /* Clear wake on lan control and status */
2462         pch_gbe_mac_set_wol_event(hw, 0);
2463 
2464         if (netif_running(netdev))
2465                 pch_gbe_up(adapter);
2466         netif_device_attach(netdev);
2467 
2468         return 0;
2469 }
2470 #endif /* CONFIG_PM */
2471 
2472 static void pch_gbe_shutdown(struct pci_dev *pdev)
2473 {
2474         __pch_gbe_suspend(pdev);
2475         if (system_state == SYSTEM_POWER_OFF) {
2476                 pci_wake_from_d3(pdev, true);
2477                 pci_set_power_state(pdev, PCI_D3hot);
2478         }
2479 }
2480 
2481 static void pch_gbe_remove(struct pci_dev *pdev)
2482 {
2483         struct net_device *netdev = pci_get_drvdata(pdev);
2484         struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2485 
2486         cancel_work_sync(&adapter->reset_task);
2487         unregister_netdev(netdev);
2488 
2489         pch_gbe_phy_hw_reset(&adapter->hw);
2490 
2491         free_netdev(netdev);
2492 }
2493 
2494 static int pch_gbe_probe(struct pci_dev *pdev,
2495                           const struct pci_device_id *pci_id)
2496 {
2497         struct net_device *netdev;
2498         struct pch_gbe_adapter *adapter;
2499         int ret;
2500 
2501         ret = pcim_enable_device(pdev);
2502         if (ret)
2503                 return ret;
2504 
2505         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2506                 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2507                 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2508                 if (ret) {
2509                         ret = pci_set_consistent_dma_mask(pdev,
2510                                                           DMA_BIT_MASK(32));
2511                         if (ret) {
2512                                 dev_err(&pdev->dev, "ERR: No usable DMA "
2513                                         "configuration, aborting\n");
2514                                 return ret;
2515                         }
2516                 }
2517         }
2518 
2519         ret = pcim_iomap_regions(pdev, 1 << PCH_GBE_PCI_BAR, pci_name(pdev));
2520         if (ret) {
2521                 dev_err(&pdev->dev,
2522                         "ERR: Can't reserve PCI I/O and memory resources\n");
2523                 return ret;
2524         }
2525         pci_set_master(pdev);
2526 
2527         netdev = alloc_etherdev((int)sizeof(struct pch_gbe_adapter));
2528         if (!netdev)
2529                 return -ENOMEM;
2530         SET_NETDEV_DEV(netdev, &pdev->dev);
2531 
2532         pci_set_drvdata(pdev, netdev);
2533         adapter = netdev_priv(netdev);
2534         adapter->netdev = netdev;
2535         adapter->pdev = pdev;
2536         adapter->hw.back = adapter;
2537         adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
2538         adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
2539         if (adapter->pdata && adapter->pdata->platform_init)
2540                 adapter->pdata->platform_init(pdev);
2541 
2542         adapter->ptp_pdev =
2543                 pci_get_domain_bus_and_slot(pci_domain_nr(adapter->pdev->bus),
2544                                             adapter->pdev->bus->number,
2545                                             PCI_DEVFN(12, 4));
2546 
2547         netdev->netdev_ops = &pch_gbe_netdev_ops;
2548         netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2549         netif_napi_add(netdev, &adapter->napi,
2550                        pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2551         netdev->hw_features = NETIF_F_RXCSUM |
2552                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2553         netdev->features = netdev->hw_features;
2554         pch_gbe_set_ethtool_ops(netdev);
2555 
2556         /* MTU range: 46 - 10300 */
2557         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
2558         netdev->max_mtu = PCH_GBE_MAX_JUMBO_FRAME_SIZE -
2559                           (ETH_HLEN + ETH_FCS_LEN);
2560 
2561         pch_gbe_mac_load_mac_addr(&adapter->hw);
2562         pch_gbe_mac_reset_hw(&adapter->hw);
2563 
2564         /* setup the private structure */
2565         ret = pch_gbe_sw_init(adapter);
2566         if (ret)
2567                 goto err_free_netdev;
2568 
2569         /* Initialize PHY */
2570         ret = pch_gbe_init_phy(adapter);
2571         if (ret) {
2572                 dev_err(&pdev->dev, "PHY initialize error\n");
2573                 goto err_free_adapter;
2574         }
2575 
2576         /* Read the MAC address. and store to the private data */
2577         ret = pch_gbe_mac_read_mac_addr(&adapter->hw);
2578         if (ret) {
2579                 dev_err(&pdev->dev, "MAC address Read Error\n");
2580                 goto err_free_adapter;
2581         }
2582 
2583         memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2584         if (!is_valid_ether_addr(netdev->dev_addr)) {
2585                 /*
2586                  * If the MAC is invalid (or just missing), display a warning
2587                  * but do not abort setting up the device. pch_gbe_up will
2588                  * prevent the interface from being brought up until a valid MAC
2589                  * is set.
2590                  */
2591                 dev_err(&pdev->dev, "Invalid MAC address, "
2592                                     "interface disabled.\n");
2593         }
2594         timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
2595 
2596         INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
2597 
2598         pch_gbe_check_options(adapter);
2599 
2600         /* initialize the wol settings based on the eeprom settings */
2601         adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
2602         dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
2603 
2604         /* reset the hardware with the new settings */
2605         pch_gbe_reset(adapter);
2606 
2607         ret = register_netdev(netdev);
2608         if (ret)
2609                 goto err_free_adapter;
2610         /* tell the stack to leave us alone until pch_gbe_open() is called */
2611         netif_carrier_off(netdev);
2612         netif_stop_queue(netdev);
2613 
2614         dev_dbg(&pdev->dev, "PCH Network Connection\n");
2615 
2616         /* Disable hibernation on certain platforms */
2617         if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
2618                 pch_gbe_phy_disable_hibernate(&adapter->hw);
2619 
2620         device_set_wakeup_enable(&pdev->dev, 1);
2621         return 0;
2622 
2623 err_free_adapter:
2624         pch_gbe_phy_hw_reset(&adapter->hw);
2625 err_free_netdev:
2626         free_netdev(netdev);
2627         return ret;
2628 }
2629 
2630 /* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
2631  * ensure it is awake for probe and init. Request the line and reset the PHY.
2632  */
2633 static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
2634 {
2635         unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
2636         unsigned gpio = MINNOW_PHY_RESET_GPIO;
2637         int ret;
2638 
2639         ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
2640                                     "minnow_phy_reset");
2641         if (ret) {
2642                 dev_err(&pdev->dev,
2643                         "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
2644                 return ret;
2645         }
2646 
2647         gpio_set_value(gpio, 0);
2648         usleep_range(1250, 1500);
2649         gpio_set_value(gpio, 1);
2650         usleep_range(1250, 1500);
2651 
2652         return ret;
2653 }
2654 
2655 static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
2656         .phy_tx_clk_delay = true,
2657         .phy_disable_hibernate = true,
2658         .platform_init = pch_gbe_minnow_platform_init,
2659 };
2660 
2661 static const struct pci_device_id pch_gbe_pcidev_id[] = {
2662         {.vendor = PCI_VENDOR_ID_INTEL,
2663          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2664          .subvendor = PCI_VENDOR_ID_CIRCUITCO,
2665          .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
2666          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2667          .class_mask = (0xFFFF00),
2668          .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
2669          },
2670         {.vendor = PCI_VENDOR_ID_INTEL,
2671          .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
2672          .subvendor = PCI_ANY_ID,
2673          .subdevice = PCI_ANY_ID,
2674          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2675          .class_mask = (0xFFFF00)
2676          },
2677         {.vendor = PCI_VENDOR_ID_ROHM,
2678          .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
2679          .subvendor = PCI_ANY_ID,
2680          .subdevice = PCI_ANY_ID,
2681          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2682          .class_mask = (0xFFFF00)
2683          },
2684         {.vendor = PCI_VENDOR_ID_ROHM,
2685          .device = PCI_DEVICE_ID_ROHM_ML7831_GBE,
2686          .subvendor = PCI_ANY_ID,
2687          .subdevice = PCI_ANY_ID,
2688          .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
2689          .class_mask = (0xFFFF00)
2690          },
2691         /* required last entry */
2692         {0}
2693 };
2694 
2695 #ifdef CONFIG_PM
2696 static const struct dev_pm_ops pch_gbe_pm_ops = {
2697         .suspend = pch_gbe_suspend,
2698         .resume = pch_gbe_resume,
2699         .freeze = pch_gbe_suspend,
2700         .thaw = pch_gbe_resume,
2701         .poweroff = pch_gbe_suspend,
2702         .restore = pch_gbe_resume,
2703 };
2704 #endif
2705 
2706 static const struct pci_error_handlers pch_gbe_err_handler = {
2707         .error_detected = pch_gbe_io_error_detected,
2708         .slot_reset = pch_gbe_io_slot_reset,
2709         .resume = pch_gbe_io_resume
2710 };
2711 
2712 static struct pci_driver pch_gbe_driver = {
2713         .name = KBUILD_MODNAME,
2714         .id_table = pch_gbe_pcidev_id,
2715         .probe = pch_gbe_probe,
2716         .remove = pch_gbe_remove,
2717 #ifdef CONFIG_PM
2718         .driver.pm = &pch_gbe_pm_ops,
2719 #endif
2720         .shutdown = pch_gbe_shutdown,
2721         .err_handler = &pch_gbe_err_handler
2722 };
2723 module_pci_driver(pch_gbe_driver);
2724 
2725 MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2726 MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
2727 MODULE_LICENSE("GPL");
2728 MODULE_VERSION(DRV_VERSION);
2729 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
2730 
2731 /* pch_gbe_main.c */

/* [<][>][^][v][top][bottom][index][help] */