root/drivers/net/ethernet/intel/e1000/e1000_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. e1000_alloc_dummy_rx_buffers
  2. e1000_get_hw_dev
  3. e1000_init_module
  4. e1000_exit_module
  5. e1000_request_irq
  6. e1000_free_irq
  7. e1000_irq_disable
  8. e1000_irq_enable
  9. e1000_update_mng_vlan
  10. e1000_init_manageability
  11. e1000_release_manageability
  12. e1000_configure
  13. e1000_up
  14. e1000_power_up_phy
  15. e1000_power_down_phy
  16. e1000_down_and_stop
  17. e1000_down
  18. e1000_reinit_locked
  19. e1000_reset
  20. e1000_dump_eeprom
  21. e1000_is_need_ioport
  22. e1000_fix_features
  23. e1000_set_features
  24. e1000_init_hw_struct
  25. e1000_probe
  26. e1000_remove
  27. e1000_sw_init
  28. e1000_alloc_queues
  29. e1000_open
  30. e1000_close
  31. e1000_check_64k_bound
  32. e1000_setup_tx_resources
  33. e1000_setup_all_tx_resources
  34. e1000_configure_tx
  35. e1000_setup_rx_resources
  36. e1000_setup_all_rx_resources
  37. e1000_setup_rctl
  38. e1000_configure_rx
  39. e1000_free_tx_resources
  40. e1000_free_all_tx_resources
  41. e1000_unmap_and_free_tx_resource
  42. e1000_clean_tx_ring
  43. e1000_clean_all_tx_rings
  44. e1000_free_rx_resources
  45. e1000_free_all_rx_resources
  46. e1000_frag_len
  47. e1000_alloc_frag
  48. e1000_clean_rx_ring
  49. e1000_clean_all_rx_rings
  50. e1000_enter_82542_rst
  51. e1000_leave_82542_rst
  52. e1000_set_mac
  53. e1000_set_rx_mode
  54. e1000_update_phy_info_task
  55. e1000_82547_tx_fifo_stall_task
  56. e1000_has_link
  57. e1000_watchdog
  58. e1000_update_itr
  59. e1000_set_itr
  60. e1000_tso
  61. e1000_tx_csum
  62. e1000_tx_map
  63. e1000_tx_queue
  64. e1000_82547_fifo_workaround
  65. __e1000_maybe_stop_tx
  66. e1000_maybe_stop_tx
  67. e1000_xmit_frame
  68. e1000_regdump
  69. e1000_dump
  70. e1000_tx_timeout
  71. e1000_reset_task
  72. e1000_change_mtu
  73. e1000_update_stats
  74. e1000_intr
  75. e1000_clean
  76. e1000_clean_tx_irq
  77. e1000_rx_checksum
  78. e1000_consume_page
  79. e1000_receive_skb
  80. e1000_tbi_adjust_stats
  81. e1000_tbi_should_accept
  82. e1000_alloc_rx_skb
  83. e1000_clean_jumbo_rx_irq
  84. e1000_copybreak
  85. e1000_clean_rx_irq
  86. e1000_alloc_jumbo_rx_buffers
  87. e1000_alloc_rx_buffers
  88. e1000_smartspeed
  89. e1000_ioctl
  90. e1000_mii_ioctl
  91. e1000_pci_set_mwi
  92. e1000_pci_clear_mwi
  93. e1000_pcix_get_mmrbc
  94. e1000_pcix_set_mmrbc
  95. e1000_io_write
  96. e1000_vlan_used
  97. __e1000_vlan_mode
  98. e1000_vlan_filter_on_off
  99. e1000_vlan_mode
  100. e1000_vlan_rx_add_vid
  101. e1000_vlan_rx_kill_vid
  102. e1000_restore_vlan
  103. e1000_set_spd_dplx
  104. __e1000_shutdown
  105. e1000_suspend
  106. e1000_resume
  107. e1000_shutdown
  108. e1000_netpoll
  109. e1000_io_error_detected
  110. e1000_io_slot_reset
  111. e1000_io_resume

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
   3 
   4 #include "e1000.h"
   5 #include <net/ip6_checksum.h>
   6 #include <linux/io.h>
   7 #include <linux/prefetch.h>
   8 #include <linux/bitops.h>
   9 #include <linux/if_vlan.h>
  10 
  11 char e1000_driver_name[] = "e1000";
  12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
  13 #define DRV_VERSION "7.3.21-k8-NAPI"
  14 const char e1000_driver_version[] = DRV_VERSION;
  15 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
  16 
  17 /* e1000_pci_tbl - PCI Device ID Table
  18  *
  19  * Last entry must be all 0s
  20  *
  21  * Macro expands to...
  22  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
  23  */
  24 static const struct pci_device_id e1000_pci_tbl[] = {
  25         INTEL_E1000_ETHERNET_DEVICE(0x1000),
  26         INTEL_E1000_ETHERNET_DEVICE(0x1001),
  27         INTEL_E1000_ETHERNET_DEVICE(0x1004),
  28         INTEL_E1000_ETHERNET_DEVICE(0x1008),
  29         INTEL_E1000_ETHERNET_DEVICE(0x1009),
  30         INTEL_E1000_ETHERNET_DEVICE(0x100C),
  31         INTEL_E1000_ETHERNET_DEVICE(0x100D),
  32         INTEL_E1000_ETHERNET_DEVICE(0x100E),
  33         INTEL_E1000_ETHERNET_DEVICE(0x100F),
  34         INTEL_E1000_ETHERNET_DEVICE(0x1010),
  35         INTEL_E1000_ETHERNET_DEVICE(0x1011),
  36         INTEL_E1000_ETHERNET_DEVICE(0x1012),
  37         INTEL_E1000_ETHERNET_DEVICE(0x1013),
  38         INTEL_E1000_ETHERNET_DEVICE(0x1014),
  39         INTEL_E1000_ETHERNET_DEVICE(0x1015),
  40         INTEL_E1000_ETHERNET_DEVICE(0x1016),
  41         INTEL_E1000_ETHERNET_DEVICE(0x1017),
  42         INTEL_E1000_ETHERNET_DEVICE(0x1018),
  43         INTEL_E1000_ETHERNET_DEVICE(0x1019),
  44         INTEL_E1000_ETHERNET_DEVICE(0x101A),
  45         INTEL_E1000_ETHERNET_DEVICE(0x101D),
  46         INTEL_E1000_ETHERNET_DEVICE(0x101E),
  47         INTEL_E1000_ETHERNET_DEVICE(0x1026),
  48         INTEL_E1000_ETHERNET_DEVICE(0x1027),
  49         INTEL_E1000_ETHERNET_DEVICE(0x1028),
  50         INTEL_E1000_ETHERNET_DEVICE(0x1075),
  51         INTEL_E1000_ETHERNET_DEVICE(0x1076),
  52         INTEL_E1000_ETHERNET_DEVICE(0x1077),
  53         INTEL_E1000_ETHERNET_DEVICE(0x1078),
  54         INTEL_E1000_ETHERNET_DEVICE(0x1079),
  55         INTEL_E1000_ETHERNET_DEVICE(0x107A),
  56         INTEL_E1000_ETHERNET_DEVICE(0x107B),
  57         INTEL_E1000_ETHERNET_DEVICE(0x107C),
  58         INTEL_E1000_ETHERNET_DEVICE(0x108A),
  59         INTEL_E1000_ETHERNET_DEVICE(0x1099),
  60         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
  61         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
  62         /* required last entry */
  63         {0,}
  64 };
  65 
  66 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  67 
  68 int e1000_up(struct e1000_adapter *adapter);
  69 void e1000_down(struct e1000_adapter *adapter);
  70 void e1000_reinit_locked(struct e1000_adapter *adapter);
  71 void e1000_reset(struct e1000_adapter *adapter);
  72 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
  73 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
  74 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
  75 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
  76 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
  77                                     struct e1000_tx_ring *txdr);
  78 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
  79                                     struct e1000_rx_ring *rxdr);
  80 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
  81                                     struct e1000_tx_ring *tx_ring);
  82 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
  83                                     struct e1000_rx_ring *rx_ring);
  84 void e1000_update_stats(struct e1000_adapter *adapter);
  85 
  86 static int e1000_init_module(void);
  87 static void e1000_exit_module(void);
  88 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  89 static void e1000_remove(struct pci_dev *pdev);
  90 static int e1000_alloc_queues(struct e1000_adapter *adapter);
  91 static int e1000_sw_init(struct e1000_adapter *adapter);
  92 int e1000_open(struct net_device *netdev);
  93 int e1000_close(struct net_device *netdev);
  94 static void e1000_configure_tx(struct e1000_adapter *adapter);
  95 static void e1000_configure_rx(struct e1000_adapter *adapter);
  96 static void e1000_setup_rctl(struct e1000_adapter *adapter);
  97 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
  98 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
  99 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
 100                                 struct e1000_tx_ring *tx_ring);
 101 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
 102                                 struct e1000_rx_ring *rx_ring);
 103 static void e1000_set_rx_mode(struct net_device *netdev);
 104 static void e1000_update_phy_info_task(struct work_struct *work);
 105 static void e1000_watchdog(struct work_struct *work);
 106 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 107 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
 108                                     struct net_device *netdev);
 109 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 110 static int e1000_set_mac(struct net_device *netdev, void *p);
 111 static irqreturn_t e1000_intr(int irq, void *data);
 112 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
 113                                struct e1000_tx_ring *tx_ring);
 114 static int e1000_clean(struct napi_struct *napi, int budget);
 115 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
 116                                struct e1000_rx_ring *rx_ring,
 117                                int *work_done, int work_to_do);
 118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 119                                      struct e1000_rx_ring *rx_ring,
 120                                      int *work_done, int work_to_do);
 121 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
 122                                          struct e1000_rx_ring *rx_ring,
 123                                          int cleaned_count)
 124 {
 125 }
 126 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
 127                                    struct e1000_rx_ring *rx_ring,
 128                                    int cleaned_count);
 129 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
 130                                          struct e1000_rx_ring *rx_ring,
 131                                          int cleaned_count);
 132 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 133 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
 134                            int cmd);
 135 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
 136 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
 137 static void e1000_tx_timeout(struct net_device *dev);
 138 static void e1000_reset_task(struct work_struct *work);
 139 static void e1000_smartspeed(struct e1000_adapter *adapter);
 140 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
 141                                        struct sk_buff *skb);
 142 
 143 static bool e1000_vlan_used(struct e1000_adapter *adapter);
 144 static void e1000_vlan_mode(struct net_device *netdev,
 145                             netdev_features_t features);
 146 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
 147                                      bool filter_on);
 148 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
 149                                  __be16 proto, u16 vid);
 150 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
 151                                   __be16 proto, u16 vid);
 152 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 153 
 154 #ifdef CONFIG_PM
 155 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
 156 static int e1000_resume(struct pci_dev *pdev);
 157 #endif
 158 static void e1000_shutdown(struct pci_dev *pdev);
 159 
 160 #ifdef CONFIG_NET_POLL_CONTROLLER
 161 /* for netdump / net console */
 162 static void e1000_netpoll (struct net_device *netdev);
 163 #endif
 164 
 165 #define COPYBREAK_DEFAULT 256
 166 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
 167 module_param(copybreak, uint, 0644);
 168 MODULE_PARM_DESC(copybreak,
 169         "Maximum size of packet that is copied to a new buffer on receive");
 170 
 171 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
 172                                                 pci_channel_state_t state);
 173 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 174 static void e1000_io_resume(struct pci_dev *pdev);
 175 
 176 static const struct pci_error_handlers e1000_err_handler = {
 177         .error_detected = e1000_io_error_detected,
 178         .slot_reset = e1000_io_slot_reset,
 179         .resume = e1000_io_resume,
 180 };
 181 
 182 static struct pci_driver e1000_driver = {
 183         .name     = e1000_driver_name,
 184         .id_table = e1000_pci_tbl,
 185         .probe    = e1000_probe,
 186         .remove   = e1000_remove,
 187 #ifdef CONFIG_PM
 188         /* Power Management Hooks */
 189         .suspend  = e1000_suspend,
 190         .resume   = e1000_resume,
 191 #endif
 192         .shutdown = e1000_shutdown,
 193         .err_handler = &e1000_err_handler
 194 };
 195 
 196 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 197 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 198 MODULE_LICENSE("GPL v2");
 199 MODULE_VERSION(DRV_VERSION);
 200 
 201 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 202 static int debug = -1;
 203 module_param(debug, int, 0);
 204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 205 
 206 /**
 207  * e1000_get_hw_dev - return device
 208  * used by hardware layer to print debugging information
 209  *
 210  **/
 211 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 212 {
 213         struct e1000_adapter *adapter = hw->back;
 214         return adapter->netdev;
 215 }
 216 
 217 /**
 218  * e1000_init_module - Driver Registration Routine
 219  *
 220  * e1000_init_module is the first routine called when the driver is
 221  * loaded. All it does is register with the PCI subsystem.
 222  **/
 223 static int __init e1000_init_module(void)
 224 {
 225         int ret;
 226         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
 227 
 228         pr_info("%s\n", e1000_copyright);
 229 
 230         ret = pci_register_driver(&e1000_driver);
 231         if (copybreak != COPYBREAK_DEFAULT) {
 232                 if (copybreak == 0)
 233                         pr_info("copybreak disabled\n");
 234                 else
 235                         pr_info("copybreak enabled for "
 236                                    "packets <= %u bytes\n", copybreak);
 237         }
 238         return ret;
 239 }
 240 
 241 module_init(e1000_init_module);
 242 
 243 /**
 244  * e1000_exit_module - Driver Exit Cleanup Routine
 245  *
 246  * e1000_exit_module is called just before the driver is removed
 247  * from memory.
 248  **/
 249 static void __exit e1000_exit_module(void)
 250 {
 251         pci_unregister_driver(&e1000_driver);
 252 }
 253 
 254 module_exit(e1000_exit_module);
 255 
 256 static int e1000_request_irq(struct e1000_adapter *adapter)
 257 {
 258         struct net_device *netdev = adapter->netdev;
 259         irq_handler_t handler = e1000_intr;
 260         int irq_flags = IRQF_SHARED;
 261         int err;
 262 
 263         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
 264                           netdev);
 265         if (err) {
 266                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
 267         }
 268 
 269         return err;
 270 }
 271 
 272 static void e1000_free_irq(struct e1000_adapter *adapter)
 273 {
 274         struct net_device *netdev = adapter->netdev;
 275 
 276         free_irq(adapter->pdev->irq, netdev);
 277 }
 278 
 279 /**
 280  * e1000_irq_disable - Mask off interrupt generation on the NIC
 281  * @adapter: board private structure
 282  **/
 283 static void e1000_irq_disable(struct e1000_adapter *adapter)
 284 {
 285         struct e1000_hw *hw = &adapter->hw;
 286 
 287         ew32(IMC, ~0);
 288         E1000_WRITE_FLUSH();
 289         synchronize_irq(adapter->pdev->irq);
 290 }
 291 
 292 /**
 293  * e1000_irq_enable - Enable default interrupt generation settings
 294  * @adapter: board private structure
 295  **/
 296 static void e1000_irq_enable(struct e1000_adapter *adapter)
 297 {
 298         struct e1000_hw *hw = &adapter->hw;
 299 
 300         ew32(IMS, IMS_ENABLE_MASK);
 301         E1000_WRITE_FLUSH();
 302 }
 303 
 304 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
 305 {
 306         struct e1000_hw *hw = &adapter->hw;
 307         struct net_device *netdev = adapter->netdev;
 308         u16 vid = hw->mng_cookie.vlan_id;
 309         u16 old_vid = adapter->mng_vlan_id;
 310 
 311         if (!e1000_vlan_used(adapter))
 312                 return;
 313 
 314         if (!test_bit(vid, adapter->active_vlans)) {
 315                 if (hw->mng_cookie.status &
 316                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
 317                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
 318                         adapter->mng_vlan_id = vid;
 319                 } else {
 320                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
 321                 }
 322                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
 323                     (vid != old_vid) &&
 324                     !test_bit(old_vid, adapter->active_vlans))
 325                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
 326                                                old_vid);
 327         } else {
 328                 adapter->mng_vlan_id = vid;
 329         }
 330 }
 331 
 332 static void e1000_init_manageability(struct e1000_adapter *adapter)
 333 {
 334         struct e1000_hw *hw = &adapter->hw;
 335 
 336         if (adapter->en_mng_pt) {
 337                 u32 manc = er32(MANC);
 338 
 339                 /* disable hardware interception of ARP */
 340                 manc &= ~(E1000_MANC_ARP_EN);
 341 
 342                 ew32(MANC, manc);
 343         }
 344 }
 345 
 346 static void e1000_release_manageability(struct e1000_adapter *adapter)
 347 {
 348         struct e1000_hw *hw = &adapter->hw;
 349 
 350         if (adapter->en_mng_pt) {
 351                 u32 manc = er32(MANC);
 352 
 353                 /* re-enable hardware interception of ARP */
 354                 manc |= E1000_MANC_ARP_EN;
 355 
 356                 ew32(MANC, manc);
 357         }
 358 }
 359 
 360 /**
 361  * e1000_configure - configure the hardware for RX and TX
 362  * @adapter = private board structure
 363  **/
 364 static void e1000_configure(struct e1000_adapter *adapter)
 365 {
 366         struct net_device *netdev = adapter->netdev;
 367         int i;
 368 
 369         e1000_set_rx_mode(netdev);
 370 
 371         e1000_restore_vlan(adapter);
 372         e1000_init_manageability(adapter);
 373 
 374         e1000_configure_tx(adapter);
 375         e1000_setup_rctl(adapter);
 376         e1000_configure_rx(adapter);
 377         /* call E1000_DESC_UNUSED which always leaves
 378          * at least 1 descriptor unused to make sure
 379          * next_to_use != next_to_clean
 380          */
 381         for (i = 0; i < adapter->num_rx_queues; i++) {
 382                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
 383                 adapter->alloc_rx_buf(adapter, ring,
 384                                       E1000_DESC_UNUSED(ring));
 385         }
 386 }
 387 
 388 int e1000_up(struct e1000_adapter *adapter)
 389 {
 390         struct e1000_hw *hw = &adapter->hw;
 391 
 392         /* hardware has been reset, we need to reload some things */
 393         e1000_configure(adapter);
 394 
 395         clear_bit(__E1000_DOWN, &adapter->flags);
 396 
 397         napi_enable(&adapter->napi);
 398 
 399         e1000_irq_enable(adapter);
 400 
 401         netif_wake_queue(adapter->netdev);
 402 
 403         /* fire a link change interrupt to start the watchdog */
 404         ew32(ICS, E1000_ICS_LSC);
 405         return 0;
 406 }
 407 
 408 /**
 409  * e1000_power_up_phy - restore link in case the phy was powered down
 410  * @adapter: address of board private structure
 411  *
 412  * The phy may be powered down to save power and turn off link when the
 413  * driver is unloaded and wake on lan is not enabled (among others)
 414  * *** this routine MUST be followed by a call to e1000_reset ***
 415  **/
 416 void e1000_power_up_phy(struct e1000_adapter *adapter)
 417 {
 418         struct e1000_hw *hw = &adapter->hw;
 419         u16 mii_reg = 0;
 420 
 421         /* Just clear the power down bit to wake the phy back up */
 422         if (hw->media_type == e1000_media_type_copper) {
 423                 /* according to the manual, the phy will retain its
 424                  * settings across a power-down/up cycle
 425                  */
 426                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 427                 mii_reg &= ~MII_CR_POWER_DOWN;
 428                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 429         }
 430 }
 431 
 432 static void e1000_power_down_phy(struct e1000_adapter *adapter)
 433 {
 434         struct e1000_hw *hw = &adapter->hw;
 435 
 436         /* Power down the PHY so no link is implied when interface is down *
 437          * The PHY cannot be powered down if any of the following is true *
 438          * (a) WoL is enabled
 439          * (b) AMT is active
 440          * (c) SoL/IDER session is active
 441          */
 442         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
 443            hw->media_type == e1000_media_type_copper) {
 444                 u16 mii_reg = 0;
 445 
 446                 switch (hw->mac_type) {
 447                 case e1000_82540:
 448                 case e1000_82545:
 449                 case e1000_82545_rev_3:
 450                 case e1000_82546:
 451                 case e1000_ce4100:
 452                 case e1000_82546_rev_3:
 453                 case e1000_82541:
 454                 case e1000_82541_rev_2:
 455                 case e1000_82547:
 456                 case e1000_82547_rev_2:
 457                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
 458                                 goto out;
 459                         break;
 460                 default:
 461                         goto out;
 462                 }
 463                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
 464                 mii_reg |= MII_CR_POWER_DOWN;
 465                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
 466                 msleep(1);
 467         }
 468 out:
 469         return;
 470 }
 471 
 472 static void e1000_down_and_stop(struct e1000_adapter *adapter)
 473 {
 474         set_bit(__E1000_DOWN, &adapter->flags);
 475 
 476         cancel_delayed_work_sync(&adapter->watchdog_task);
 477 
 478         /*
 479          * Since the watchdog task can reschedule other tasks, we should cancel
 480          * it first, otherwise we can run into the situation when a work is
 481          * still running after the adapter has been turned down.
 482          */
 483 
 484         cancel_delayed_work_sync(&adapter->phy_info_task);
 485         cancel_delayed_work_sync(&adapter->fifo_stall_task);
 486 
 487         /* Only kill reset task if adapter is not resetting */
 488         if (!test_bit(__E1000_RESETTING, &adapter->flags))
 489                 cancel_work_sync(&adapter->reset_task);
 490 }
 491 
 492 void e1000_down(struct e1000_adapter *adapter)
 493 {
 494         struct e1000_hw *hw = &adapter->hw;
 495         struct net_device *netdev = adapter->netdev;
 496         u32 rctl, tctl;
 497 
 498         /* disable receives in the hardware */
 499         rctl = er32(RCTL);
 500         ew32(RCTL, rctl & ~E1000_RCTL_EN);
 501         /* flush and sleep below */
 502 
 503         netif_tx_disable(netdev);
 504 
 505         /* disable transmits in the hardware */
 506         tctl = er32(TCTL);
 507         tctl &= ~E1000_TCTL_EN;
 508         ew32(TCTL, tctl);
 509         /* flush both disables and wait for them to finish */
 510         E1000_WRITE_FLUSH();
 511         msleep(10);
 512 
 513         /* Set the carrier off after transmits have been disabled in the
 514          * hardware, to avoid race conditions with e1000_watchdog() (which
 515          * may be running concurrently to us, checking for the carrier
 516          * bit to decide whether it should enable transmits again). Such
 517          * a race condition would result into transmission being disabled
 518          * in the hardware until the next IFF_DOWN+IFF_UP cycle.
 519          */
 520         netif_carrier_off(netdev);
 521 
 522         napi_disable(&adapter->napi);
 523 
 524         e1000_irq_disable(adapter);
 525 
 526         /* Setting DOWN must be after irq_disable to prevent
 527          * a screaming interrupt.  Setting DOWN also prevents
 528          * tasks from rescheduling.
 529          */
 530         e1000_down_and_stop(adapter);
 531 
 532         adapter->link_speed = 0;
 533         adapter->link_duplex = 0;
 534 
 535         e1000_reset(adapter);
 536         e1000_clean_all_tx_rings(adapter);
 537         e1000_clean_all_rx_rings(adapter);
 538 }
 539 
 540 void e1000_reinit_locked(struct e1000_adapter *adapter)
 541 {
 542         WARN_ON(in_interrupt());
 543         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
 544                 msleep(1);
 545         e1000_down(adapter);
 546         e1000_up(adapter);
 547         clear_bit(__E1000_RESETTING, &adapter->flags);
 548 }
 549 
 550 void e1000_reset(struct e1000_adapter *adapter)
 551 {
 552         struct e1000_hw *hw = &adapter->hw;
 553         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
 554         bool legacy_pba_adjust = false;
 555         u16 hwm;
 556 
 557         /* Repartition Pba for greater than 9k mtu
 558          * To take effect CTRL.RST is required.
 559          */
 560 
 561         switch (hw->mac_type) {
 562         case e1000_82542_rev2_0:
 563         case e1000_82542_rev2_1:
 564         case e1000_82543:
 565         case e1000_82544:
 566         case e1000_82540:
 567         case e1000_82541:
 568         case e1000_82541_rev_2:
 569                 legacy_pba_adjust = true;
 570                 pba = E1000_PBA_48K;
 571                 break;
 572         case e1000_82545:
 573         case e1000_82545_rev_3:
 574         case e1000_82546:
 575         case e1000_ce4100:
 576         case e1000_82546_rev_3:
 577                 pba = E1000_PBA_48K;
 578                 break;
 579         case e1000_82547:
 580         case e1000_82547_rev_2:
 581                 legacy_pba_adjust = true;
 582                 pba = E1000_PBA_30K;
 583                 break;
 584         case e1000_undefined:
 585         case e1000_num_macs:
 586                 break;
 587         }
 588 
 589         if (legacy_pba_adjust) {
 590                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
 591                         pba -= 8; /* allocate more FIFO for Tx */
 592 
 593                 if (hw->mac_type == e1000_82547) {
 594                         adapter->tx_fifo_head = 0;
 595                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
 596                         adapter->tx_fifo_size =
 597                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
 598                         atomic_set(&adapter->tx_fifo_stall, 0);
 599                 }
 600         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
 601                 /* adjust PBA for jumbo frames */
 602                 ew32(PBA, pba);
 603 
 604                 /* To maintain wire speed transmits, the Tx FIFO should be
 605                  * large enough to accommodate two full transmit packets,
 606                  * rounded up to the next 1KB and expressed in KB.  Likewise,
 607                  * the Rx FIFO should be large enough to accommodate at least
 608                  * one full receive packet and is similarly rounded up and
 609                  * expressed in KB.
 610                  */
 611                 pba = er32(PBA);
 612                 /* upper 16 bits has Tx packet buffer allocation size in KB */
 613                 tx_space = pba >> 16;
 614                 /* lower 16 bits has Rx packet buffer allocation size in KB */
 615                 pba &= 0xffff;
 616                 /* the Tx fifo also stores 16 bytes of information about the Tx
 617                  * but don't include ethernet FCS because hardware appends it
 618                  */
 619                 min_tx_space = (hw->max_frame_size +
 620                                 sizeof(struct e1000_tx_desc) -
 621                                 ETH_FCS_LEN) * 2;
 622                 min_tx_space = ALIGN(min_tx_space, 1024);
 623                 min_tx_space >>= 10;
 624                 /* software strips receive CRC, so leave room for it */
 625                 min_rx_space = hw->max_frame_size;
 626                 min_rx_space = ALIGN(min_rx_space, 1024);
 627                 min_rx_space >>= 10;
 628 
 629                 /* If current Tx allocation is less than the min Tx FIFO size,
 630                  * and the min Tx FIFO size is less than the current Rx FIFO
 631                  * allocation, take space away from current Rx allocation
 632                  */
 633                 if (tx_space < min_tx_space &&
 634                     ((min_tx_space - tx_space) < pba)) {
 635                         pba = pba - (min_tx_space - tx_space);
 636 
 637                         /* PCI/PCIx hardware has PBA alignment constraints */
 638                         switch (hw->mac_type) {
 639                         case e1000_82545 ... e1000_82546_rev_3:
 640                                 pba &= ~(E1000_PBA_8K - 1);
 641                                 break;
 642                         default:
 643                                 break;
 644                         }
 645 
 646                         /* if short on Rx space, Rx wins and must trump Tx
 647                          * adjustment or use Early Receive if available
 648                          */
 649                         if (pba < min_rx_space)
 650                                 pba = min_rx_space;
 651                 }
 652         }
 653 
 654         ew32(PBA, pba);
 655 
 656         /* flow control settings:
 657          * The high water mark must be low enough to fit one full frame
 658          * (or the size used for early receive) above it in the Rx FIFO.
 659          * Set it to the lower of:
 660          * - 90% of the Rx FIFO size, and
 661          * - the full Rx FIFO size minus the early receive size (for parts
 662          *   with ERT support assuming ERT set to E1000_ERT_2048), or
 663          * - the full Rx FIFO size minus one full frame
 664          */
 665         hwm = min(((pba << 10) * 9 / 10),
 666                   ((pba << 10) - hw->max_frame_size));
 667 
 668         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
 669         hw->fc_low_water = hw->fc_high_water - 8;
 670         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
 671         hw->fc_send_xon = 1;
 672         hw->fc = hw->original_fc;
 673 
 674         /* Allow time for pending master requests to run */
 675         e1000_reset_hw(hw);
 676         if (hw->mac_type >= e1000_82544)
 677                 ew32(WUC, 0);
 678 
 679         if (e1000_init_hw(hw))
 680                 e_dev_err("Hardware Error\n");
 681         e1000_update_mng_vlan(adapter);
 682 
 683         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
 684         if (hw->mac_type >= e1000_82544 &&
 685             hw->autoneg == 1 &&
 686             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
 687                 u32 ctrl = er32(CTRL);
 688                 /* clear phy power management bit if we are in gig only mode,
 689                  * which if enabled will attempt negotiation to 100Mb, which
 690                  * can cause a loss of link at power off or driver unload
 691                  */
 692                 ctrl &= ~E1000_CTRL_SWDPIN3;
 693                 ew32(CTRL, ctrl);
 694         }
 695 
 696         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
 697         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
 698 
 699         e1000_reset_adaptive(hw);
 700         e1000_phy_get_info(hw, &adapter->phy_info);
 701 
 702         e1000_release_manageability(adapter);
 703 }
 704 
 705 /* Dump the eeprom for users having checksum issues */
 706 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
 707 {
 708         struct net_device *netdev = adapter->netdev;
 709         struct ethtool_eeprom eeprom;
 710         const struct ethtool_ops *ops = netdev->ethtool_ops;
 711         u8 *data;
 712         int i;
 713         u16 csum_old, csum_new = 0;
 714 
 715         eeprom.len = ops->get_eeprom_len(netdev);
 716         eeprom.offset = 0;
 717 
 718         data = kmalloc(eeprom.len, GFP_KERNEL);
 719         if (!data)
 720                 return;
 721 
 722         ops->get_eeprom(netdev, &eeprom, data);
 723 
 724         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
 725                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
 726         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
 727                 csum_new += data[i] + (data[i + 1] << 8);
 728         csum_new = EEPROM_SUM - csum_new;
 729 
 730         pr_err("/*********************/\n");
 731         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
 732         pr_err("Calculated              : 0x%04x\n", csum_new);
 733 
 734         pr_err("Offset    Values\n");
 735         pr_err("========  ======\n");
 736         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
 737 
 738         pr_err("Include this output when contacting your support provider.\n");
 739         pr_err("This is not a software error! Something bad happened to\n");
 740         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
 741         pr_err("result in further problems, possibly loss of data,\n");
 742         pr_err("corruption or system hangs!\n");
 743         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
 744         pr_err("which is invalid and requires you to set the proper MAC\n");
 745         pr_err("address manually before continuing to enable this network\n");
 746         pr_err("device. Please inspect the EEPROM dump and report the\n");
 747         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
 748         pr_err("/*********************/\n");
 749 
 750         kfree(data);
 751 }
 752 
 753 /**
 754  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
 755  * @pdev: PCI device information struct
 756  *
 757  * Return true if an adapter needs ioport resources
 758  **/
 759 static int e1000_is_need_ioport(struct pci_dev *pdev)
 760 {
 761         switch (pdev->device) {
 762         case E1000_DEV_ID_82540EM:
 763         case E1000_DEV_ID_82540EM_LOM:
 764         case E1000_DEV_ID_82540EP:
 765         case E1000_DEV_ID_82540EP_LOM:
 766         case E1000_DEV_ID_82540EP_LP:
 767         case E1000_DEV_ID_82541EI:
 768         case E1000_DEV_ID_82541EI_MOBILE:
 769         case E1000_DEV_ID_82541ER:
 770         case E1000_DEV_ID_82541ER_LOM:
 771         case E1000_DEV_ID_82541GI:
 772         case E1000_DEV_ID_82541GI_LF:
 773         case E1000_DEV_ID_82541GI_MOBILE:
 774         case E1000_DEV_ID_82544EI_COPPER:
 775         case E1000_DEV_ID_82544EI_FIBER:
 776         case E1000_DEV_ID_82544GC_COPPER:
 777         case E1000_DEV_ID_82544GC_LOM:
 778         case E1000_DEV_ID_82545EM_COPPER:
 779         case E1000_DEV_ID_82545EM_FIBER:
 780         case E1000_DEV_ID_82546EB_COPPER:
 781         case E1000_DEV_ID_82546EB_FIBER:
 782         case E1000_DEV_ID_82546EB_QUAD_COPPER:
 783                 return true;
 784         default:
 785                 return false;
 786         }
 787 }
 788 
 789 static netdev_features_t e1000_fix_features(struct net_device *netdev,
 790         netdev_features_t features)
 791 {
 792         /* Since there is no support for separate Rx/Tx vlan accel
 793          * enable/disable make sure Tx flag is always in same state as Rx.
 794          */
 795         if (features & NETIF_F_HW_VLAN_CTAG_RX)
 796                 features |= NETIF_F_HW_VLAN_CTAG_TX;
 797         else
 798                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 799 
 800         return features;
 801 }
 802 
 803 static int e1000_set_features(struct net_device *netdev,
 804         netdev_features_t features)
 805 {
 806         struct e1000_adapter *adapter = netdev_priv(netdev);
 807         netdev_features_t changed = features ^ netdev->features;
 808 
 809         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 810                 e1000_vlan_mode(netdev, features);
 811 
 812         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
 813                 return 0;
 814 
 815         netdev->features = features;
 816         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
 817 
 818         if (netif_running(netdev))
 819                 e1000_reinit_locked(adapter);
 820         else
 821                 e1000_reset(adapter);
 822 
 823         return 1;
 824 }
 825 
 826 static const struct net_device_ops e1000_netdev_ops = {
 827         .ndo_open               = e1000_open,
 828         .ndo_stop               = e1000_close,
 829         .ndo_start_xmit         = e1000_xmit_frame,
 830         .ndo_set_rx_mode        = e1000_set_rx_mode,
 831         .ndo_set_mac_address    = e1000_set_mac,
 832         .ndo_tx_timeout         = e1000_tx_timeout,
 833         .ndo_change_mtu         = e1000_change_mtu,
 834         .ndo_do_ioctl           = e1000_ioctl,
 835         .ndo_validate_addr      = eth_validate_addr,
 836         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
 837         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
 838 #ifdef CONFIG_NET_POLL_CONTROLLER
 839         .ndo_poll_controller    = e1000_netpoll,
 840 #endif
 841         .ndo_fix_features       = e1000_fix_features,
 842         .ndo_set_features       = e1000_set_features,
 843 };
 844 
 845 /**
 846  * e1000_init_hw_struct - initialize members of hw struct
 847  * @adapter: board private struct
 848  * @hw: structure used by e1000_hw.c
 849  *
 850  * Factors out initialization of the e1000_hw struct to its own function
 851  * that can be called very early at init (just after struct allocation).
 852  * Fields are initialized based on PCI device information and
 853  * OS network device settings (MTU size).
 854  * Returns negative error codes if MAC type setup fails.
 855  */
 856 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
 857                                 struct e1000_hw *hw)
 858 {
 859         struct pci_dev *pdev = adapter->pdev;
 860 
 861         /* PCI config space info */
 862         hw->vendor_id = pdev->vendor;
 863         hw->device_id = pdev->device;
 864         hw->subsystem_vendor_id = pdev->subsystem_vendor;
 865         hw->subsystem_id = pdev->subsystem_device;
 866         hw->revision_id = pdev->revision;
 867 
 868         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 869 
 870         hw->max_frame_size = adapter->netdev->mtu +
 871                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 872         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
 873 
 874         /* identify the MAC */
 875         if (e1000_set_mac_type(hw)) {
 876                 e_err(probe, "Unknown MAC Type\n");
 877                 return -EIO;
 878         }
 879 
 880         switch (hw->mac_type) {
 881         default:
 882                 break;
 883         case e1000_82541:
 884         case e1000_82547:
 885         case e1000_82541_rev_2:
 886         case e1000_82547_rev_2:
 887                 hw->phy_init_script = 1;
 888                 break;
 889         }
 890 
 891         e1000_set_media_type(hw);
 892         e1000_get_bus_info(hw);
 893 
 894         hw->wait_autoneg_complete = false;
 895         hw->tbi_compatibility_en = true;
 896         hw->adaptive_ifs = true;
 897 
 898         /* Copper options */
 899 
 900         if (hw->media_type == e1000_media_type_copper) {
 901                 hw->mdix = AUTO_ALL_MODES;
 902                 hw->disable_polarity_correction = false;
 903                 hw->master_slave = E1000_MASTER_SLAVE;
 904         }
 905 
 906         return 0;
 907 }
 908 
 909 /**
 910  * e1000_probe - Device Initialization Routine
 911  * @pdev: PCI device information struct
 912  * @ent: entry in e1000_pci_tbl
 913  *
 914  * Returns 0 on success, negative on failure
 915  *
 916  * e1000_probe initializes an adapter identified by a pci_dev structure.
 917  * The OS initialization, configuring of the adapter private structure,
 918  * and a hardware reset occur.
 919  **/
 920 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 921 {
 922         struct net_device *netdev;
 923         struct e1000_adapter *adapter = NULL;
 924         struct e1000_hw *hw;
 925 
 926         static int cards_found;
 927         static int global_quad_port_a; /* global ksp3 port a indication */
 928         int i, err, pci_using_dac;
 929         u16 eeprom_data = 0;
 930         u16 tmp = 0;
 931         u16 eeprom_apme_mask = E1000_EEPROM_APME;
 932         int bars, need_ioport;
 933         bool disable_dev = false;
 934 
 935         /* do not allocate ioport bars when not needed */
 936         need_ioport = e1000_is_need_ioport(pdev);
 937         if (need_ioport) {
 938                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
 939                 err = pci_enable_device(pdev);
 940         } else {
 941                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
 942                 err = pci_enable_device_mem(pdev);
 943         }
 944         if (err)
 945                 return err;
 946 
 947         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
 948         if (err)
 949                 goto err_pci_reg;
 950 
 951         pci_set_master(pdev);
 952         err = pci_save_state(pdev);
 953         if (err)
 954                 goto err_alloc_etherdev;
 955 
 956         err = -ENOMEM;
 957         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
 958         if (!netdev)
 959                 goto err_alloc_etherdev;
 960 
 961         SET_NETDEV_DEV(netdev, &pdev->dev);
 962 
 963         pci_set_drvdata(pdev, netdev);
 964         adapter = netdev_priv(netdev);
 965         adapter->netdev = netdev;
 966         adapter->pdev = pdev;
 967         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 968         adapter->bars = bars;
 969         adapter->need_ioport = need_ioport;
 970 
 971         hw = &adapter->hw;
 972         hw->back = adapter;
 973 
 974         err = -EIO;
 975         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
 976         if (!hw->hw_addr)
 977                 goto err_ioremap;
 978 
 979         if (adapter->need_ioport) {
 980                 for (i = BAR_1; i <= BAR_5; i++) {
 981                         if (pci_resource_len(pdev, i) == 0)
 982                                 continue;
 983                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 984                                 hw->io_base = pci_resource_start(pdev, i);
 985                                 break;
 986                         }
 987                 }
 988         }
 989 
 990         /* make ready for any if (hw->...) below */
 991         err = e1000_init_hw_struct(adapter, hw);
 992         if (err)
 993                 goto err_sw_init;
 994 
 995         /* there is a workaround being applied below that limits
 996          * 64-bit DMA addresses to 64-bit hardware.  There are some
 997          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
 998          */
 999         pci_using_dac = 0;
1000         if ((hw->bus_type == e1000_bus_type_pcix) &&
1001             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1002                 pci_using_dac = 1;
1003         } else {
1004                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1005                 if (err) {
1006                         pr_err("No usable DMA config, aborting\n");
1007                         goto err_dma;
1008                 }
1009         }
1010 
1011         netdev->netdev_ops = &e1000_netdev_ops;
1012         e1000_set_ethtool_ops(netdev);
1013         netdev->watchdog_timeo = 5 * HZ;
1014         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1015 
1016         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1017 
1018         adapter->bd_number = cards_found;
1019 
1020         /* setup the private structure */
1021 
1022         err = e1000_sw_init(adapter);
1023         if (err)
1024                 goto err_sw_init;
1025 
1026         err = -EIO;
1027         if (hw->mac_type == e1000_ce4100) {
1028                 hw->ce4100_gbe_mdio_base_virt =
1029                                         ioremap(pci_resource_start(pdev, BAR_1),
1030                                                 pci_resource_len(pdev, BAR_1));
1031 
1032                 if (!hw->ce4100_gbe_mdio_base_virt)
1033                         goto err_mdio_ioremap;
1034         }
1035 
1036         if (hw->mac_type >= e1000_82543) {
1037                 netdev->hw_features = NETIF_F_SG |
1038                                    NETIF_F_HW_CSUM |
1039                                    NETIF_F_HW_VLAN_CTAG_RX;
1040                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1041                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1042         }
1043 
1044         if ((hw->mac_type >= e1000_82544) &&
1045            (hw->mac_type != e1000_82547))
1046                 netdev->hw_features |= NETIF_F_TSO;
1047 
1048         netdev->priv_flags |= IFF_SUPP_NOFCS;
1049 
1050         netdev->features |= netdev->hw_features;
1051         netdev->hw_features |= (NETIF_F_RXCSUM |
1052                                 NETIF_F_RXALL |
1053                                 NETIF_F_RXFCS);
1054 
1055         if (pci_using_dac) {
1056                 netdev->features |= NETIF_F_HIGHDMA;
1057                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1058         }
1059 
1060         netdev->vlan_features |= (NETIF_F_TSO |
1061                                   NETIF_F_HW_CSUM |
1062                                   NETIF_F_SG);
1063 
1064         /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1065         if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1066             hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1067                 netdev->priv_flags |= IFF_UNICAST_FLT;
1068 
1069         /* MTU range: 46 - 16110 */
1070         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1071         netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1072 
1073         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074 
1075         /* initialize eeprom parameters */
1076         if (e1000_init_eeprom_params(hw)) {
1077                 e_err(probe, "EEPROM initialization failed\n");
1078                 goto err_eeprom;
1079         }
1080 
1081         /* before reading the EEPROM, reset the controller to
1082          * put the device in a known good starting state
1083          */
1084 
1085         e1000_reset_hw(hw);
1086 
1087         /* make sure the EEPROM is good */
1088         if (e1000_validate_eeprom_checksum(hw) < 0) {
1089                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090                 e1000_dump_eeprom(adapter);
1091                 /* set MAC address to all zeroes to invalidate and temporary
1092                  * disable this device for the user. This blocks regular
1093                  * traffic while still permitting ethtool ioctls from reaching
1094                  * the hardware as well as allowing the user to run the
1095                  * interface after manually setting a hw addr using
1096                  * `ip set address`
1097                  */
1098                 memset(hw->mac_addr, 0, netdev->addr_len);
1099         } else {
1100                 /* copy the MAC address out of the EEPROM */
1101                 if (e1000_read_mac_addr(hw))
1102                         e_err(probe, "EEPROM Read Error\n");
1103         }
1104         /* don't block initialization here due to bad MAC address */
1105         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1106 
1107         if (!is_valid_ether_addr(netdev->dev_addr))
1108                 e_err(probe, "Invalid MAC Address\n");
1109 
1110 
1111         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113                           e1000_82547_tx_fifo_stall_task);
1114         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116 
1117         e1000_check_options(adapter);
1118 
1119         /* Initial Wake on LAN setting
1120          * If APM wake is enabled in the EEPROM,
1121          * enable the ACPI Magic Packet filter
1122          */
1123 
1124         switch (hw->mac_type) {
1125         case e1000_82542_rev2_0:
1126         case e1000_82542_rev2_1:
1127         case e1000_82543:
1128                 break;
1129         case e1000_82544:
1130                 e1000_read_eeprom(hw,
1131                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133                 break;
1134         case e1000_82546:
1135         case e1000_82546_rev_3:
1136                 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1137                         e1000_read_eeprom(hw,
1138                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139                         break;
1140                 }
1141                 /* Fall Through */
1142         default:
1143                 e1000_read_eeprom(hw,
1144                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145                 break;
1146         }
1147         if (eeprom_data & eeprom_apme_mask)
1148                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1149 
1150         /* now that we have the eeprom settings, apply the special cases
1151          * where the eeprom may be wrong or the board simply won't support
1152          * wake on lan on a particular port
1153          */
1154         switch (pdev->device) {
1155         case E1000_DEV_ID_82546GB_PCIE:
1156                 adapter->eeprom_wol = 0;
1157                 break;
1158         case E1000_DEV_ID_82546EB_FIBER:
1159         case E1000_DEV_ID_82546GB_FIBER:
1160                 /* Wake events only supported on port A for dual fiber
1161                  * regardless of eeprom setting
1162                  */
1163                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164                         adapter->eeprom_wol = 0;
1165                 break;
1166         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167                 /* if quad port adapter, disable WoL on all but port A */
1168                 if (global_quad_port_a != 0)
1169                         adapter->eeprom_wol = 0;
1170                 else
1171                         adapter->quad_port_a = true;
1172                 /* Reset for multiple quad port adapters */
1173                 if (++global_quad_port_a == 4)
1174                         global_quad_port_a = 0;
1175                 break;
1176         }
1177 
1178         /* initialize the wol settings based on the eeprom settings */
1179         adapter->wol = adapter->eeprom_wol;
1180         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181 
1182         /* Auto detect PHY address */
1183         if (hw->mac_type == e1000_ce4100) {
1184                 for (i = 0; i < 32; i++) {
1185                         hw->phy_addr = i;
1186                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187 
1188                         if (tmp != 0 && tmp != 0xFF)
1189                                 break;
1190                 }
1191 
1192                 if (i >= 32)
1193                         goto err_eeprom;
1194         }
1195 
1196         /* reset the hardware with the new settings */
1197         e1000_reset(adapter);
1198 
1199         strcpy(netdev->name, "eth%d");
1200         err = register_netdev(netdev);
1201         if (err)
1202                 goto err_register;
1203 
1204         e1000_vlan_filter_on_off(adapter, false);
1205 
1206         /* print bus type/speed/width info */
1207         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214                netdev->dev_addr);
1215 
1216         /* carrier off reporting is important to ethtool even BEFORE open */
1217         netif_carrier_off(netdev);
1218 
1219         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220 
1221         cards_found++;
1222         return 0;
1223 
1224 err_register:
1225 err_eeprom:
1226         e1000_phy_hw_reset(hw);
1227 
1228         if (hw->flash_address)
1229                 iounmap(hw->flash_address);
1230         kfree(adapter->tx_ring);
1231         kfree(adapter->rx_ring);
1232 err_dma:
1233 err_sw_init:
1234 err_mdio_ioremap:
1235         iounmap(hw->ce4100_gbe_mdio_base_virt);
1236         iounmap(hw->hw_addr);
1237 err_ioremap:
1238         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1239         free_netdev(netdev);
1240 err_alloc_etherdev:
1241         pci_release_selected_regions(pdev, bars);
1242 err_pci_reg:
1243         if (!adapter || disable_dev)
1244                 pci_disable_device(pdev);
1245         return err;
1246 }
1247 
1248 /**
1249  * e1000_remove - Device Removal Routine
1250  * @pdev: PCI device information struct
1251  *
1252  * e1000_remove is called by the PCI subsystem to alert the driver
1253  * that it should release a PCI device. That could be caused by a
1254  * Hot-Plug event, or because the driver is going to be removed from
1255  * memory.
1256  **/
1257 static void e1000_remove(struct pci_dev *pdev)
1258 {
1259         struct net_device *netdev = pci_get_drvdata(pdev);
1260         struct e1000_adapter *adapter = netdev_priv(netdev);
1261         struct e1000_hw *hw = &adapter->hw;
1262         bool disable_dev;
1263 
1264         e1000_down_and_stop(adapter);
1265         e1000_release_manageability(adapter);
1266 
1267         unregister_netdev(netdev);
1268 
1269         e1000_phy_hw_reset(hw);
1270 
1271         kfree(adapter->tx_ring);
1272         kfree(adapter->rx_ring);
1273 
1274         if (hw->mac_type == e1000_ce4100)
1275                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1276         iounmap(hw->hw_addr);
1277         if (hw->flash_address)
1278                 iounmap(hw->flash_address);
1279         pci_release_selected_regions(pdev, adapter->bars);
1280 
1281         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1282         free_netdev(netdev);
1283 
1284         if (disable_dev)
1285                 pci_disable_device(pdev);
1286 }
1287 
1288 /**
1289  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1290  * @adapter: board private structure to initialize
1291  *
1292  * e1000_sw_init initializes the Adapter private data structure.
1293  * e1000_init_hw_struct MUST be called before this function
1294  **/
1295 static int e1000_sw_init(struct e1000_adapter *adapter)
1296 {
1297         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1298 
1299         adapter->num_tx_queues = 1;
1300         adapter->num_rx_queues = 1;
1301 
1302         if (e1000_alloc_queues(adapter)) {
1303                 e_err(probe, "Unable to allocate memory for queues\n");
1304                 return -ENOMEM;
1305         }
1306 
1307         /* Explicitly disable IRQ since the NIC can be in any state. */
1308         e1000_irq_disable(adapter);
1309 
1310         spin_lock_init(&adapter->stats_lock);
1311 
1312         set_bit(__E1000_DOWN, &adapter->flags);
1313 
1314         return 0;
1315 }
1316 
1317 /**
1318  * e1000_alloc_queues - Allocate memory for all rings
1319  * @adapter: board private structure to initialize
1320  *
1321  * We allocate one ring per queue at run-time since we don't know the
1322  * number of queues at compile-time.
1323  **/
1324 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1325 {
1326         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1327                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1328         if (!adapter->tx_ring)
1329                 return -ENOMEM;
1330 
1331         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1332                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1333         if (!adapter->rx_ring) {
1334                 kfree(adapter->tx_ring);
1335                 return -ENOMEM;
1336         }
1337 
1338         return E1000_SUCCESS;
1339 }
1340 
1341 /**
1342  * e1000_open - Called when a network interface is made active
1343  * @netdev: network interface device structure
1344  *
1345  * Returns 0 on success, negative value on failure
1346  *
1347  * The open entry point is called when a network interface is made
1348  * active by the system (IFF_UP).  At this point all resources needed
1349  * for transmit and receive operations are allocated, the interrupt
1350  * handler is registered with the OS, the watchdog task is started,
1351  * and the stack is notified that the interface is ready.
1352  **/
1353 int e1000_open(struct net_device *netdev)
1354 {
1355         struct e1000_adapter *adapter = netdev_priv(netdev);
1356         struct e1000_hw *hw = &adapter->hw;
1357         int err;
1358 
1359         /* disallow open during test */
1360         if (test_bit(__E1000_TESTING, &adapter->flags))
1361                 return -EBUSY;
1362 
1363         netif_carrier_off(netdev);
1364 
1365         /* allocate transmit descriptors */
1366         err = e1000_setup_all_tx_resources(adapter);
1367         if (err)
1368                 goto err_setup_tx;
1369 
1370         /* allocate receive descriptors */
1371         err = e1000_setup_all_rx_resources(adapter);
1372         if (err)
1373                 goto err_setup_rx;
1374 
1375         e1000_power_up_phy(adapter);
1376 
1377         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1378         if ((hw->mng_cookie.status &
1379                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1380                 e1000_update_mng_vlan(adapter);
1381         }
1382 
1383         /* before we allocate an interrupt, we must be ready to handle it.
1384          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1385          * as soon as we call pci_request_irq, so we have to setup our
1386          * clean_rx handler before we do so.
1387          */
1388         e1000_configure(adapter);
1389 
1390         err = e1000_request_irq(adapter);
1391         if (err)
1392                 goto err_req_irq;
1393 
1394         /* From here on the code is the same as e1000_up() */
1395         clear_bit(__E1000_DOWN, &adapter->flags);
1396 
1397         napi_enable(&adapter->napi);
1398 
1399         e1000_irq_enable(adapter);
1400 
1401         netif_start_queue(netdev);
1402 
1403         /* fire a link status change interrupt to start the watchdog */
1404         ew32(ICS, E1000_ICS_LSC);
1405 
1406         return E1000_SUCCESS;
1407 
1408 err_req_irq:
1409         e1000_power_down_phy(adapter);
1410         e1000_free_all_rx_resources(adapter);
1411 err_setup_rx:
1412         e1000_free_all_tx_resources(adapter);
1413 err_setup_tx:
1414         e1000_reset(adapter);
1415 
1416         return err;
1417 }
1418 
1419 /**
1420  * e1000_close - Disables a network interface
1421  * @netdev: network interface device structure
1422  *
1423  * Returns 0, this is not allowed to fail
1424  *
1425  * The close entry point is called when an interface is de-activated
1426  * by the OS.  The hardware is still under the drivers control, but
1427  * needs to be disabled.  A global MAC reset is issued to stop the
1428  * hardware, and all transmit and receive resources are freed.
1429  **/
1430 int e1000_close(struct net_device *netdev)
1431 {
1432         struct e1000_adapter *adapter = netdev_priv(netdev);
1433         struct e1000_hw *hw = &adapter->hw;
1434         int count = E1000_CHECK_RESET_COUNT;
1435 
1436         while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1437                 usleep_range(10000, 20000);
1438 
1439         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1440         e1000_down(adapter);
1441         e1000_power_down_phy(adapter);
1442         e1000_free_irq(adapter);
1443 
1444         e1000_free_all_tx_resources(adapter);
1445         e1000_free_all_rx_resources(adapter);
1446 
1447         /* kill manageability vlan ID if supported, but not if a vlan with
1448          * the same ID is registered on the host OS (let 8021q kill it)
1449          */
1450         if ((hw->mng_cookie.status &
1451              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1452             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1453                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1454                                        adapter->mng_vlan_id);
1455         }
1456 
1457         return 0;
1458 }
1459 
1460 /**
1461  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1462  * @adapter: address of board private structure
1463  * @start: address of beginning of memory
1464  * @len: length of memory
1465  **/
1466 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1467                                   unsigned long len)
1468 {
1469         struct e1000_hw *hw = &adapter->hw;
1470         unsigned long begin = (unsigned long)start;
1471         unsigned long end = begin + len;
1472 
1473         /* First rev 82545 and 82546 need to not allow any memory
1474          * write location to cross 64k boundary due to errata 23
1475          */
1476         if (hw->mac_type == e1000_82545 ||
1477             hw->mac_type == e1000_ce4100 ||
1478             hw->mac_type == e1000_82546) {
1479                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1480         }
1481 
1482         return true;
1483 }
1484 
1485 /**
1486  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1487  * @adapter: board private structure
1488  * @txdr:    tx descriptor ring (for a specific queue) to setup
1489  *
1490  * Return 0 on success, negative on failure
1491  **/
1492 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1493                                     struct e1000_tx_ring *txdr)
1494 {
1495         struct pci_dev *pdev = adapter->pdev;
1496         int size;
1497 
1498         size = sizeof(struct e1000_tx_buffer) * txdr->count;
1499         txdr->buffer_info = vzalloc(size);
1500         if (!txdr->buffer_info)
1501                 return -ENOMEM;
1502 
1503         /* round up to nearest 4K */
1504 
1505         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1506         txdr->size = ALIGN(txdr->size, 4096);
1507 
1508         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1509                                         GFP_KERNEL);
1510         if (!txdr->desc) {
1511 setup_tx_desc_die:
1512                 vfree(txdr->buffer_info);
1513                 return -ENOMEM;
1514         }
1515 
1516         /* Fix for errata 23, can't cross 64kB boundary */
1517         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1518                 void *olddesc = txdr->desc;
1519                 dma_addr_t olddma = txdr->dma;
1520                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1521                       txdr->size, txdr->desc);
1522                 /* Try again, without freeing the previous */
1523                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1524                                                 &txdr->dma, GFP_KERNEL);
1525                 /* Failed allocation, critical failure */
1526                 if (!txdr->desc) {
1527                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1528                                           olddma);
1529                         goto setup_tx_desc_die;
1530                 }
1531 
1532                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1533                         /* give up */
1534                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1535                                           txdr->dma);
1536                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1537                                           olddma);
1538                         e_err(probe, "Unable to allocate aligned memory "
1539                               "for the transmit descriptor ring\n");
1540                         vfree(txdr->buffer_info);
1541                         return -ENOMEM;
1542                 } else {
1543                         /* Free old allocation, new allocation was successful */
1544                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1545                                           olddma);
1546                 }
1547         }
1548         memset(txdr->desc, 0, txdr->size);
1549 
1550         txdr->next_to_use = 0;
1551         txdr->next_to_clean = 0;
1552 
1553         return 0;
1554 }
1555 
1556 /**
1557  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1558  *                                (Descriptors) for all queues
1559  * @adapter: board private structure
1560  *
1561  * Return 0 on success, negative on failure
1562  **/
1563 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1564 {
1565         int i, err = 0;
1566 
1567         for (i = 0; i < adapter->num_tx_queues; i++) {
1568                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1569                 if (err) {
1570                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1571                         for (i-- ; i >= 0; i--)
1572                                 e1000_free_tx_resources(adapter,
1573                                                         &adapter->tx_ring[i]);
1574                         break;
1575                 }
1576         }
1577 
1578         return err;
1579 }
1580 
1581 /**
1582  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1583  * @adapter: board private structure
1584  *
1585  * Configure the Tx unit of the MAC after a reset.
1586  **/
1587 static void e1000_configure_tx(struct e1000_adapter *adapter)
1588 {
1589         u64 tdba;
1590         struct e1000_hw *hw = &adapter->hw;
1591         u32 tdlen, tctl, tipg;
1592         u32 ipgr1, ipgr2;
1593 
1594         /* Setup the HW Tx Head and Tail descriptor pointers */
1595 
1596         switch (adapter->num_tx_queues) {
1597         case 1:
1598         default:
1599                 tdba = adapter->tx_ring[0].dma;
1600                 tdlen = adapter->tx_ring[0].count *
1601                         sizeof(struct e1000_tx_desc);
1602                 ew32(TDLEN, tdlen);
1603                 ew32(TDBAH, (tdba >> 32));
1604                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1605                 ew32(TDT, 0);
1606                 ew32(TDH, 0);
1607                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1608                                            E1000_TDH : E1000_82542_TDH);
1609                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1610                                            E1000_TDT : E1000_82542_TDT);
1611                 break;
1612         }
1613 
1614         /* Set the default values for the Tx Inter Packet Gap timer */
1615         if ((hw->media_type == e1000_media_type_fiber ||
1616              hw->media_type == e1000_media_type_internal_serdes))
1617                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1618         else
1619                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1620 
1621         switch (hw->mac_type) {
1622         case e1000_82542_rev2_0:
1623         case e1000_82542_rev2_1:
1624                 tipg = DEFAULT_82542_TIPG_IPGT;
1625                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1626                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1627                 break;
1628         default:
1629                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1630                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1631                 break;
1632         }
1633         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1634         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1635         ew32(TIPG, tipg);
1636 
1637         /* Set the Tx Interrupt Delay register */
1638 
1639         ew32(TIDV, adapter->tx_int_delay);
1640         if (hw->mac_type >= e1000_82540)
1641                 ew32(TADV, adapter->tx_abs_int_delay);
1642 
1643         /* Program the Transmit Control Register */
1644 
1645         tctl = er32(TCTL);
1646         tctl &= ~E1000_TCTL_CT;
1647         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1648                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1649 
1650         e1000_config_collision_dist(hw);
1651 
1652         /* Setup Transmit Descriptor Settings for eop descriptor */
1653         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1654 
1655         /* only set IDE if we are delaying interrupts using the timers */
1656         if (adapter->tx_int_delay)
1657                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1658 
1659         if (hw->mac_type < e1000_82543)
1660                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1661         else
1662                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1663 
1664         /* Cache if we're 82544 running in PCI-X because we'll
1665          * need this to apply a workaround later in the send path.
1666          */
1667         if (hw->mac_type == e1000_82544 &&
1668             hw->bus_type == e1000_bus_type_pcix)
1669                 adapter->pcix_82544 = true;
1670 
1671         ew32(TCTL, tctl);
1672 
1673 }
1674 
1675 /**
1676  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1677  * @adapter: board private structure
1678  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1679  *
1680  * Returns 0 on success, negative on failure
1681  **/
1682 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1683                                     struct e1000_rx_ring *rxdr)
1684 {
1685         struct pci_dev *pdev = adapter->pdev;
1686         int size, desc_len;
1687 
1688         size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1689         rxdr->buffer_info = vzalloc(size);
1690         if (!rxdr->buffer_info)
1691                 return -ENOMEM;
1692 
1693         desc_len = sizeof(struct e1000_rx_desc);
1694 
1695         /* Round up to nearest 4K */
1696 
1697         rxdr->size = rxdr->count * desc_len;
1698         rxdr->size = ALIGN(rxdr->size, 4096);
1699 
1700         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1701                                         GFP_KERNEL);
1702         if (!rxdr->desc) {
1703 setup_rx_desc_die:
1704                 vfree(rxdr->buffer_info);
1705                 return -ENOMEM;
1706         }
1707 
1708         /* Fix for errata 23, can't cross 64kB boundary */
1709         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1710                 void *olddesc = rxdr->desc;
1711                 dma_addr_t olddma = rxdr->dma;
1712                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1713                       rxdr->size, rxdr->desc);
1714                 /* Try again, without freeing the previous */
1715                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1716                                                 &rxdr->dma, GFP_KERNEL);
1717                 /* Failed allocation, critical failure */
1718                 if (!rxdr->desc) {
1719                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1720                                           olddma);
1721                         goto setup_rx_desc_die;
1722                 }
1723 
1724                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1725                         /* give up */
1726                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1727                                           rxdr->dma);
1728                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1729                                           olddma);
1730                         e_err(probe, "Unable to allocate aligned memory for "
1731                               "the Rx descriptor ring\n");
1732                         goto setup_rx_desc_die;
1733                 } else {
1734                         /* Free old allocation, new allocation was successful */
1735                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1736                                           olddma);
1737                 }
1738         }
1739         memset(rxdr->desc, 0, rxdr->size);
1740 
1741         rxdr->next_to_clean = 0;
1742         rxdr->next_to_use = 0;
1743         rxdr->rx_skb_top = NULL;
1744 
1745         return 0;
1746 }
1747 
1748 /**
1749  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1750  *                                (Descriptors) for all queues
1751  * @adapter: board private structure
1752  *
1753  * Return 0 on success, negative on failure
1754  **/
1755 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1756 {
1757         int i, err = 0;
1758 
1759         for (i = 0; i < adapter->num_rx_queues; i++) {
1760                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1761                 if (err) {
1762                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1763                         for (i-- ; i >= 0; i--)
1764                                 e1000_free_rx_resources(adapter,
1765                                                         &adapter->rx_ring[i]);
1766                         break;
1767                 }
1768         }
1769 
1770         return err;
1771 }
1772 
1773 /**
1774  * e1000_setup_rctl - configure the receive control registers
1775  * @adapter: Board private structure
1776  **/
1777 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1778 {
1779         struct e1000_hw *hw = &adapter->hw;
1780         u32 rctl;
1781 
1782         rctl = er32(RCTL);
1783 
1784         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1785 
1786         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1787                 E1000_RCTL_RDMTS_HALF |
1788                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1789 
1790         if (hw->tbi_compatibility_on == 1)
1791                 rctl |= E1000_RCTL_SBP;
1792         else
1793                 rctl &= ~E1000_RCTL_SBP;
1794 
1795         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1796                 rctl &= ~E1000_RCTL_LPE;
1797         else
1798                 rctl |= E1000_RCTL_LPE;
1799 
1800         /* Setup buffer sizes */
1801         rctl &= ~E1000_RCTL_SZ_4096;
1802         rctl |= E1000_RCTL_BSEX;
1803         switch (adapter->rx_buffer_len) {
1804         case E1000_RXBUFFER_2048:
1805         default:
1806                 rctl |= E1000_RCTL_SZ_2048;
1807                 rctl &= ~E1000_RCTL_BSEX;
1808                 break;
1809         case E1000_RXBUFFER_4096:
1810                 rctl |= E1000_RCTL_SZ_4096;
1811                 break;
1812         case E1000_RXBUFFER_8192:
1813                 rctl |= E1000_RCTL_SZ_8192;
1814                 break;
1815         case E1000_RXBUFFER_16384:
1816                 rctl |= E1000_RCTL_SZ_16384;
1817                 break;
1818         }
1819 
1820         /* This is useful for sniffing bad packets. */
1821         if (adapter->netdev->features & NETIF_F_RXALL) {
1822                 /* UPE and MPE will be handled by normal PROMISC logic
1823                  * in e1000e_set_rx_mode
1824                  */
1825                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1826                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1827                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1828 
1829                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1830                           E1000_RCTL_DPF | /* Allow filtered pause */
1831                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1832                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1833                  * and that breaks VLANs.
1834                  */
1835         }
1836 
1837         ew32(RCTL, rctl);
1838 }
1839 
1840 /**
1841  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1842  * @adapter: board private structure
1843  *
1844  * Configure the Rx unit of the MAC after a reset.
1845  **/
1846 static void e1000_configure_rx(struct e1000_adapter *adapter)
1847 {
1848         u64 rdba;
1849         struct e1000_hw *hw = &adapter->hw;
1850         u32 rdlen, rctl, rxcsum;
1851 
1852         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1853                 rdlen = adapter->rx_ring[0].count *
1854                         sizeof(struct e1000_rx_desc);
1855                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1856                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1857         } else {
1858                 rdlen = adapter->rx_ring[0].count *
1859                         sizeof(struct e1000_rx_desc);
1860                 adapter->clean_rx = e1000_clean_rx_irq;
1861                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1862         }
1863 
1864         /* disable receives while setting up the descriptors */
1865         rctl = er32(RCTL);
1866         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1867 
1868         /* set the Receive Delay Timer Register */
1869         ew32(RDTR, adapter->rx_int_delay);
1870 
1871         if (hw->mac_type >= e1000_82540) {
1872                 ew32(RADV, adapter->rx_abs_int_delay);
1873                 if (adapter->itr_setting != 0)
1874                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1875         }
1876 
1877         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1878          * the Base and Length of the Rx Descriptor Ring
1879          */
1880         switch (adapter->num_rx_queues) {
1881         case 1:
1882         default:
1883                 rdba = adapter->rx_ring[0].dma;
1884                 ew32(RDLEN, rdlen);
1885                 ew32(RDBAH, (rdba >> 32));
1886                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1887                 ew32(RDT, 0);
1888                 ew32(RDH, 0);
1889                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1890                                            E1000_RDH : E1000_82542_RDH);
1891                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1892                                            E1000_RDT : E1000_82542_RDT);
1893                 break;
1894         }
1895 
1896         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1897         if (hw->mac_type >= e1000_82543) {
1898                 rxcsum = er32(RXCSUM);
1899                 if (adapter->rx_csum)
1900                         rxcsum |= E1000_RXCSUM_TUOFL;
1901                 else
1902                         /* don't need to clear IPPCSE as it defaults to 0 */
1903                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1904                 ew32(RXCSUM, rxcsum);
1905         }
1906 
1907         /* Enable Receives */
1908         ew32(RCTL, rctl | E1000_RCTL_EN);
1909 }
1910 
1911 /**
1912  * e1000_free_tx_resources - Free Tx Resources per Queue
1913  * @adapter: board private structure
1914  * @tx_ring: Tx descriptor ring for a specific queue
1915  *
1916  * Free all transmit software resources
1917  **/
1918 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1919                                     struct e1000_tx_ring *tx_ring)
1920 {
1921         struct pci_dev *pdev = adapter->pdev;
1922 
1923         e1000_clean_tx_ring(adapter, tx_ring);
1924 
1925         vfree(tx_ring->buffer_info);
1926         tx_ring->buffer_info = NULL;
1927 
1928         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1929                           tx_ring->dma);
1930 
1931         tx_ring->desc = NULL;
1932 }
1933 
1934 /**
1935  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1936  * @adapter: board private structure
1937  *
1938  * Free all transmit software resources
1939  **/
1940 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1941 {
1942         int i;
1943 
1944         for (i = 0; i < adapter->num_tx_queues; i++)
1945                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1946 }
1947 
1948 static void
1949 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1950                                  struct e1000_tx_buffer *buffer_info)
1951 {
1952         if (buffer_info->dma) {
1953                 if (buffer_info->mapped_as_page)
1954                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1955                                        buffer_info->length, DMA_TO_DEVICE);
1956                 else
1957                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1958                                          buffer_info->length,
1959                                          DMA_TO_DEVICE);
1960                 buffer_info->dma = 0;
1961         }
1962         if (buffer_info->skb) {
1963                 dev_kfree_skb_any(buffer_info->skb);
1964                 buffer_info->skb = NULL;
1965         }
1966         buffer_info->time_stamp = 0;
1967         /* buffer_info must be completely set up in the transmit path */
1968 }
1969 
1970 /**
1971  * e1000_clean_tx_ring - Free Tx Buffers
1972  * @adapter: board private structure
1973  * @tx_ring: ring to be cleaned
1974  **/
1975 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1976                                 struct e1000_tx_ring *tx_ring)
1977 {
1978         struct e1000_hw *hw = &adapter->hw;
1979         struct e1000_tx_buffer *buffer_info;
1980         unsigned long size;
1981         unsigned int i;
1982 
1983         /* Free all the Tx ring sk_buffs */
1984 
1985         for (i = 0; i < tx_ring->count; i++) {
1986                 buffer_info = &tx_ring->buffer_info[i];
1987                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1988         }
1989 
1990         netdev_reset_queue(adapter->netdev);
1991         size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1992         memset(tx_ring->buffer_info, 0, size);
1993 
1994         /* Zero out the descriptor ring */
1995 
1996         memset(tx_ring->desc, 0, tx_ring->size);
1997 
1998         tx_ring->next_to_use = 0;
1999         tx_ring->next_to_clean = 0;
2000         tx_ring->last_tx_tso = false;
2001 
2002         writel(0, hw->hw_addr + tx_ring->tdh);
2003         writel(0, hw->hw_addr + tx_ring->tdt);
2004 }
2005 
2006 /**
2007  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2008  * @adapter: board private structure
2009  **/
2010 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2011 {
2012         int i;
2013 
2014         for (i = 0; i < adapter->num_tx_queues; i++)
2015                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2016 }
2017 
2018 /**
2019  * e1000_free_rx_resources - Free Rx Resources
2020  * @adapter: board private structure
2021  * @rx_ring: ring to clean the resources from
2022  *
2023  * Free all receive software resources
2024  **/
2025 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2026                                     struct e1000_rx_ring *rx_ring)
2027 {
2028         struct pci_dev *pdev = adapter->pdev;
2029 
2030         e1000_clean_rx_ring(adapter, rx_ring);
2031 
2032         vfree(rx_ring->buffer_info);
2033         rx_ring->buffer_info = NULL;
2034 
2035         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2036                           rx_ring->dma);
2037 
2038         rx_ring->desc = NULL;
2039 }
2040 
2041 /**
2042  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2043  * @adapter: board private structure
2044  *
2045  * Free all receive software resources
2046  **/
2047 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2048 {
2049         int i;
2050 
2051         for (i = 0; i < adapter->num_rx_queues; i++)
2052                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2053 }
2054 
2055 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2056 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2057 {
2058         return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2059                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2060 }
2061 
2062 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2063 {
2064         unsigned int len = e1000_frag_len(a);
2065         u8 *data = netdev_alloc_frag(len);
2066 
2067         if (likely(data))
2068                 data += E1000_HEADROOM;
2069         return data;
2070 }
2071 
2072 /**
2073  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2074  * @adapter: board private structure
2075  * @rx_ring: ring to free buffers from
2076  **/
2077 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2078                                 struct e1000_rx_ring *rx_ring)
2079 {
2080         struct e1000_hw *hw = &adapter->hw;
2081         struct e1000_rx_buffer *buffer_info;
2082         struct pci_dev *pdev = adapter->pdev;
2083         unsigned long size;
2084         unsigned int i;
2085 
2086         /* Free all the Rx netfrags */
2087         for (i = 0; i < rx_ring->count; i++) {
2088                 buffer_info = &rx_ring->buffer_info[i];
2089                 if (adapter->clean_rx == e1000_clean_rx_irq) {
2090                         if (buffer_info->dma)
2091                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
2092                                                  adapter->rx_buffer_len,
2093                                                  DMA_FROM_DEVICE);
2094                         if (buffer_info->rxbuf.data) {
2095                                 skb_free_frag(buffer_info->rxbuf.data);
2096                                 buffer_info->rxbuf.data = NULL;
2097                         }
2098                 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2099                         if (buffer_info->dma)
2100                                 dma_unmap_page(&pdev->dev, buffer_info->dma,
2101                                                adapter->rx_buffer_len,
2102                                                DMA_FROM_DEVICE);
2103                         if (buffer_info->rxbuf.page) {
2104                                 put_page(buffer_info->rxbuf.page);
2105                                 buffer_info->rxbuf.page = NULL;
2106                         }
2107                 }
2108 
2109                 buffer_info->dma = 0;
2110         }
2111 
2112         /* there also may be some cached data from a chained receive */
2113         napi_free_frags(&adapter->napi);
2114         rx_ring->rx_skb_top = NULL;
2115 
2116         size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2117         memset(rx_ring->buffer_info, 0, size);
2118 
2119         /* Zero out the descriptor ring */
2120         memset(rx_ring->desc, 0, rx_ring->size);
2121 
2122         rx_ring->next_to_clean = 0;
2123         rx_ring->next_to_use = 0;
2124 
2125         writel(0, hw->hw_addr + rx_ring->rdh);
2126         writel(0, hw->hw_addr + rx_ring->rdt);
2127 }
2128 
2129 /**
2130  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2131  * @adapter: board private structure
2132  **/
2133 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2134 {
2135         int i;
2136 
2137         for (i = 0; i < adapter->num_rx_queues; i++)
2138                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2139 }
2140 
2141 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2142  * and memory write and invalidate disabled for certain operations
2143  */
2144 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2145 {
2146         struct e1000_hw *hw = &adapter->hw;
2147         struct net_device *netdev = adapter->netdev;
2148         u32 rctl;
2149 
2150         e1000_pci_clear_mwi(hw);
2151 
2152         rctl = er32(RCTL);
2153         rctl |= E1000_RCTL_RST;
2154         ew32(RCTL, rctl);
2155         E1000_WRITE_FLUSH();
2156         mdelay(5);
2157 
2158         if (netif_running(netdev))
2159                 e1000_clean_all_rx_rings(adapter);
2160 }
2161 
2162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2163 {
2164         struct e1000_hw *hw = &adapter->hw;
2165         struct net_device *netdev = adapter->netdev;
2166         u32 rctl;
2167 
2168         rctl = er32(RCTL);
2169         rctl &= ~E1000_RCTL_RST;
2170         ew32(RCTL, rctl);
2171         E1000_WRITE_FLUSH();
2172         mdelay(5);
2173 
2174         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2175                 e1000_pci_set_mwi(hw);
2176 
2177         if (netif_running(netdev)) {
2178                 /* No need to loop, because 82542 supports only 1 queue */
2179                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2180                 e1000_configure_rx(adapter);
2181                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2182         }
2183 }
2184 
2185 /**
2186  * e1000_set_mac - Change the Ethernet Address of the NIC
2187  * @netdev: network interface device structure
2188  * @p: pointer to an address structure
2189  *
2190  * Returns 0 on success, negative on failure
2191  **/
2192 static int e1000_set_mac(struct net_device *netdev, void *p)
2193 {
2194         struct e1000_adapter *adapter = netdev_priv(netdev);
2195         struct e1000_hw *hw = &adapter->hw;
2196         struct sockaddr *addr = p;
2197 
2198         if (!is_valid_ether_addr(addr->sa_data))
2199                 return -EADDRNOTAVAIL;
2200 
2201         /* 82542 2.0 needs to be in reset to write receive address registers */
2202 
2203         if (hw->mac_type == e1000_82542_rev2_0)
2204                 e1000_enter_82542_rst(adapter);
2205 
2206         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2207         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2208 
2209         e1000_rar_set(hw, hw->mac_addr, 0);
2210 
2211         if (hw->mac_type == e1000_82542_rev2_0)
2212                 e1000_leave_82542_rst(adapter);
2213 
2214         return 0;
2215 }
2216 
2217 /**
2218  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2219  * @netdev: network interface device structure
2220  *
2221  * The set_rx_mode entry point is called whenever the unicast or multicast
2222  * address lists or the network interface flags are updated. This routine is
2223  * responsible for configuring the hardware for proper unicast, multicast,
2224  * promiscuous mode, and all-multi behavior.
2225  **/
2226 static void e1000_set_rx_mode(struct net_device *netdev)
2227 {
2228         struct e1000_adapter *adapter = netdev_priv(netdev);
2229         struct e1000_hw *hw = &adapter->hw;
2230         struct netdev_hw_addr *ha;
2231         bool use_uc = false;
2232         u32 rctl;
2233         u32 hash_value;
2234         int i, rar_entries = E1000_RAR_ENTRIES;
2235         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2236         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2237 
2238         if (!mcarray)
2239                 return;
2240 
2241         /* Check for Promiscuous and All Multicast modes */
2242 
2243         rctl = er32(RCTL);
2244 
2245         if (netdev->flags & IFF_PROMISC) {
2246                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2247                 rctl &= ~E1000_RCTL_VFE;
2248         } else {
2249                 if (netdev->flags & IFF_ALLMULTI)
2250                         rctl |= E1000_RCTL_MPE;
2251                 else
2252                         rctl &= ~E1000_RCTL_MPE;
2253                 /* Enable VLAN filter if there is a VLAN */
2254                 if (e1000_vlan_used(adapter))
2255                         rctl |= E1000_RCTL_VFE;
2256         }
2257 
2258         if (netdev_uc_count(netdev) > rar_entries - 1) {
2259                 rctl |= E1000_RCTL_UPE;
2260         } else if (!(netdev->flags & IFF_PROMISC)) {
2261                 rctl &= ~E1000_RCTL_UPE;
2262                 use_uc = true;
2263         }
2264 
2265         ew32(RCTL, rctl);
2266 
2267         /* 82542 2.0 needs to be in reset to write receive address registers */
2268 
2269         if (hw->mac_type == e1000_82542_rev2_0)
2270                 e1000_enter_82542_rst(adapter);
2271 
2272         /* load the first 14 addresses into the exact filters 1-14. Unicast
2273          * addresses take precedence to avoid disabling unicast filtering
2274          * when possible.
2275          *
2276          * RAR 0 is used for the station MAC address
2277          * if there are not 14 addresses, go ahead and clear the filters
2278          */
2279         i = 1;
2280         if (use_uc)
2281                 netdev_for_each_uc_addr(ha, netdev) {
2282                         if (i == rar_entries)
2283                                 break;
2284                         e1000_rar_set(hw, ha->addr, i++);
2285                 }
2286 
2287         netdev_for_each_mc_addr(ha, netdev) {
2288                 if (i == rar_entries) {
2289                         /* load any remaining addresses into the hash table */
2290                         u32 hash_reg, hash_bit, mta;
2291                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2292                         hash_reg = (hash_value >> 5) & 0x7F;
2293                         hash_bit = hash_value & 0x1F;
2294                         mta = (1 << hash_bit);
2295                         mcarray[hash_reg] |= mta;
2296                 } else {
2297                         e1000_rar_set(hw, ha->addr, i++);
2298                 }
2299         }
2300 
2301         for (; i < rar_entries; i++) {
2302                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2303                 E1000_WRITE_FLUSH();
2304                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2305                 E1000_WRITE_FLUSH();
2306         }
2307 
2308         /* write the hash table completely, write from bottom to avoid
2309          * both stupid write combining chipsets, and flushing each write
2310          */
2311         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2312                 /* If we are on an 82544 has an errata where writing odd
2313                  * offsets overwrites the previous even offset, but writing
2314                  * backwards over the range solves the issue by always
2315                  * writing the odd offset first
2316                  */
2317                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2318         }
2319         E1000_WRITE_FLUSH();
2320 
2321         if (hw->mac_type == e1000_82542_rev2_0)
2322                 e1000_leave_82542_rst(adapter);
2323 
2324         kfree(mcarray);
2325 }
2326 
2327 /**
2328  * e1000_update_phy_info_task - get phy info
2329  * @work: work struct contained inside adapter struct
2330  *
2331  * Need to wait a few seconds after link up to get diagnostic information from
2332  * the phy
2333  */
2334 static void e1000_update_phy_info_task(struct work_struct *work)
2335 {
2336         struct e1000_adapter *adapter = container_of(work,
2337                                                      struct e1000_adapter,
2338                                                      phy_info_task.work);
2339 
2340         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2341 }
2342 
2343 /**
2344  * e1000_82547_tx_fifo_stall_task - task to complete work
2345  * @work: work struct contained inside adapter struct
2346  **/
2347 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2348 {
2349         struct e1000_adapter *adapter = container_of(work,
2350                                                      struct e1000_adapter,
2351                                                      fifo_stall_task.work);
2352         struct e1000_hw *hw = &adapter->hw;
2353         struct net_device *netdev = adapter->netdev;
2354         u32 tctl;
2355 
2356         if (atomic_read(&adapter->tx_fifo_stall)) {
2357                 if ((er32(TDT) == er32(TDH)) &&
2358                    (er32(TDFT) == er32(TDFH)) &&
2359                    (er32(TDFTS) == er32(TDFHS))) {
2360                         tctl = er32(TCTL);
2361                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2362                         ew32(TDFT, adapter->tx_head_addr);
2363                         ew32(TDFH, adapter->tx_head_addr);
2364                         ew32(TDFTS, adapter->tx_head_addr);
2365                         ew32(TDFHS, adapter->tx_head_addr);
2366                         ew32(TCTL, tctl);
2367                         E1000_WRITE_FLUSH();
2368 
2369                         adapter->tx_fifo_head = 0;
2370                         atomic_set(&adapter->tx_fifo_stall, 0);
2371                         netif_wake_queue(netdev);
2372                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2373                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2374                 }
2375         }
2376 }
2377 
2378 bool e1000_has_link(struct e1000_adapter *adapter)
2379 {
2380         struct e1000_hw *hw = &adapter->hw;
2381         bool link_active = false;
2382 
2383         /* get_link_status is set on LSC (link status) interrupt or rx
2384          * sequence error interrupt (except on intel ce4100).
2385          * get_link_status will stay false until the
2386          * e1000_check_for_link establishes link for copper adapters
2387          * ONLY
2388          */
2389         switch (hw->media_type) {
2390         case e1000_media_type_copper:
2391                 if (hw->mac_type == e1000_ce4100)
2392                         hw->get_link_status = 1;
2393                 if (hw->get_link_status) {
2394                         e1000_check_for_link(hw);
2395                         link_active = !hw->get_link_status;
2396                 } else {
2397                         link_active = true;
2398                 }
2399                 break;
2400         case e1000_media_type_fiber:
2401                 e1000_check_for_link(hw);
2402                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2403                 break;
2404         case e1000_media_type_internal_serdes:
2405                 e1000_check_for_link(hw);
2406                 link_active = hw->serdes_has_link;
2407                 break;
2408         default:
2409                 break;
2410         }
2411 
2412         return link_active;
2413 }
2414 
2415 /**
2416  * e1000_watchdog - work function
2417  * @work: work struct contained inside adapter struct
2418  **/
2419 static void e1000_watchdog(struct work_struct *work)
2420 {
2421         struct e1000_adapter *adapter = container_of(work,
2422                                                      struct e1000_adapter,
2423                                                      watchdog_task.work);
2424         struct e1000_hw *hw = &adapter->hw;
2425         struct net_device *netdev = adapter->netdev;
2426         struct e1000_tx_ring *txdr = adapter->tx_ring;
2427         u32 link, tctl;
2428 
2429         link = e1000_has_link(adapter);
2430         if ((netif_carrier_ok(netdev)) && link)
2431                 goto link_up;
2432 
2433         if (link) {
2434                 if (!netif_carrier_ok(netdev)) {
2435                         u32 ctrl;
2436                         /* update snapshot of PHY registers on LSC */
2437                         e1000_get_speed_and_duplex(hw,
2438                                                    &adapter->link_speed,
2439                                                    &adapter->link_duplex);
2440 
2441                         ctrl = er32(CTRL);
2442                         pr_info("%s NIC Link is Up %d Mbps %s, "
2443                                 "Flow Control: %s\n",
2444                                 netdev->name,
2445                                 adapter->link_speed,
2446                                 adapter->link_duplex == FULL_DUPLEX ?
2447                                 "Full Duplex" : "Half Duplex",
2448                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2449                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2450                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2451                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2452 
2453                         /* adjust timeout factor according to speed/duplex */
2454                         adapter->tx_timeout_factor = 1;
2455                         switch (adapter->link_speed) {
2456                         case SPEED_10:
2457                                 adapter->tx_timeout_factor = 16;
2458                                 break;
2459                         case SPEED_100:
2460                                 /* maybe add some timeout factor ? */
2461                                 break;
2462                         }
2463 
2464                         /* enable transmits in the hardware */
2465                         tctl = er32(TCTL);
2466                         tctl |= E1000_TCTL_EN;
2467                         ew32(TCTL, tctl);
2468 
2469                         netif_carrier_on(netdev);
2470                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2471                                 schedule_delayed_work(&adapter->phy_info_task,
2472                                                       2 * HZ);
2473                         adapter->smartspeed = 0;
2474                 }
2475         } else {
2476                 if (netif_carrier_ok(netdev)) {
2477                         adapter->link_speed = 0;
2478                         adapter->link_duplex = 0;
2479                         pr_info("%s NIC Link is Down\n",
2480                                 netdev->name);
2481                         netif_carrier_off(netdev);
2482 
2483                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2484                                 schedule_delayed_work(&adapter->phy_info_task,
2485                                                       2 * HZ);
2486                 }
2487 
2488                 e1000_smartspeed(adapter);
2489         }
2490 
2491 link_up:
2492         e1000_update_stats(adapter);
2493 
2494         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2495         adapter->tpt_old = adapter->stats.tpt;
2496         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2497         adapter->colc_old = adapter->stats.colc;
2498 
2499         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2500         adapter->gorcl_old = adapter->stats.gorcl;
2501         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2502         adapter->gotcl_old = adapter->stats.gotcl;
2503 
2504         e1000_update_adaptive(hw);
2505 
2506         if (!netif_carrier_ok(netdev)) {
2507                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2508                         /* We've lost link, so the controller stops DMA,
2509                          * but we've got queued Tx work that's never going
2510                          * to get done, so reset controller to flush Tx.
2511                          * (Do the reset outside of interrupt context).
2512                          */
2513                         adapter->tx_timeout_count++;
2514                         schedule_work(&adapter->reset_task);
2515                         /* exit immediately since reset is imminent */
2516                         return;
2517                 }
2518         }
2519 
2520         /* Simple mode for Interrupt Throttle Rate (ITR) */
2521         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2522                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2523                  * Total asymmetrical Tx or Rx gets ITR=8000;
2524                  * everyone else is between 2000-8000.
2525                  */
2526                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2527                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2528                             adapter->gotcl - adapter->gorcl :
2529                             adapter->gorcl - adapter->gotcl) / 10000;
2530                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2531 
2532                 ew32(ITR, 1000000000 / (itr * 256));
2533         }
2534 
2535         /* Cause software interrupt to ensure rx ring is cleaned */
2536         ew32(ICS, E1000_ICS_RXDMT0);
2537 
2538         /* Force detection of hung controller every watchdog period */
2539         adapter->detect_tx_hung = true;
2540 
2541         /* Reschedule the task */
2542         if (!test_bit(__E1000_DOWN, &adapter->flags))
2543                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2544 }
2545 
2546 enum latency_range {
2547         lowest_latency = 0,
2548         low_latency = 1,
2549         bulk_latency = 2,
2550         latency_invalid = 255
2551 };
2552 
2553 /**
2554  * e1000_update_itr - update the dynamic ITR value based on statistics
2555  * @adapter: pointer to adapter
2556  * @itr_setting: current adapter->itr
2557  * @packets: the number of packets during this measurement interval
2558  * @bytes: the number of bytes during this measurement interval
2559  *
2560  *      Stores a new ITR value based on packets and byte
2561  *      counts during the last interrupt.  The advantage of per interrupt
2562  *      computation is faster updates and more accurate ITR for the current
2563  *      traffic pattern.  Constants in this function were computed
2564  *      based on theoretical maximum wire speed and thresholds were set based
2565  *      on testing data as well as attempting to minimize response time
2566  *      while increasing bulk throughput.
2567  *      this functionality is controlled by the InterruptThrottleRate module
2568  *      parameter (see e1000_param.c)
2569  **/
2570 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2571                                      u16 itr_setting, int packets, int bytes)
2572 {
2573         unsigned int retval = itr_setting;
2574         struct e1000_hw *hw = &adapter->hw;
2575 
2576         if (unlikely(hw->mac_type < e1000_82540))
2577                 goto update_itr_done;
2578 
2579         if (packets == 0)
2580                 goto update_itr_done;
2581 
2582         switch (itr_setting) {
2583         case lowest_latency:
2584                 /* jumbo frames get bulk treatment*/
2585                 if (bytes/packets > 8000)
2586                         retval = bulk_latency;
2587                 else if ((packets < 5) && (bytes > 512))
2588                         retval = low_latency;
2589                 break;
2590         case low_latency:  /* 50 usec aka 20000 ints/s */
2591                 if (bytes > 10000) {
2592                         /* jumbo frames need bulk latency setting */
2593                         if (bytes/packets > 8000)
2594                                 retval = bulk_latency;
2595                         else if ((packets < 10) || ((bytes/packets) > 1200))
2596                                 retval = bulk_latency;
2597                         else if ((packets > 35))
2598                                 retval = lowest_latency;
2599                 } else if (bytes/packets > 2000)
2600                         retval = bulk_latency;
2601                 else if (packets <= 2 && bytes < 512)
2602                         retval = lowest_latency;
2603                 break;
2604         case bulk_latency: /* 250 usec aka 4000 ints/s */
2605                 if (bytes > 25000) {
2606                         if (packets > 35)
2607                                 retval = low_latency;
2608                 } else if (bytes < 6000) {
2609                         retval = low_latency;
2610                 }
2611                 break;
2612         }
2613 
2614 update_itr_done:
2615         return retval;
2616 }
2617 
2618 static void e1000_set_itr(struct e1000_adapter *adapter)
2619 {
2620         struct e1000_hw *hw = &adapter->hw;
2621         u16 current_itr;
2622         u32 new_itr = adapter->itr;
2623 
2624         if (unlikely(hw->mac_type < e1000_82540))
2625                 return;
2626 
2627         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2628         if (unlikely(adapter->link_speed != SPEED_1000)) {
2629                 current_itr = 0;
2630                 new_itr = 4000;
2631                 goto set_itr_now;
2632         }
2633 
2634         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2635                                            adapter->total_tx_packets,
2636                                            adapter->total_tx_bytes);
2637         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2638         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2639                 adapter->tx_itr = low_latency;
2640 
2641         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2642                                            adapter->total_rx_packets,
2643                                            adapter->total_rx_bytes);
2644         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2645         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2646                 adapter->rx_itr = low_latency;
2647 
2648         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2649 
2650         switch (current_itr) {
2651         /* counts and packets in update_itr are dependent on these numbers */
2652         case lowest_latency:
2653                 new_itr = 70000;
2654                 break;
2655         case low_latency:
2656                 new_itr = 20000; /* aka hwitr = ~200 */
2657                 break;
2658         case bulk_latency:
2659                 new_itr = 4000;
2660                 break;
2661         default:
2662                 break;
2663         }
2664 
2665 set_itr_now:
2666         if (new_itr != adapter->itr) {
2667                 /* this attempts to bias the interrupt rate towards Bulk
2668                  * by adding intermediate steps when interrupt rate is
2669                  * increasing
2670                  */
2671                 new_itr = new_itr > adapter->itr ?
2672                           min(adapter->itr + (new_itr >> 2), new_itr) :
2673                           new_itr;
2674                 adapter->itr = new_itr;
2675                 ew32(ITR, 1000000000 / (new_itr * 256));
2676         }
2677 }
2678 
2679 #define E1000_TX_FLAGS_CSUM             0x00000001
2680 #define E1000_TX_FLAGS_VLAN             0x00000002
2681 #define E1000_TX_FLAGS_TSO              0x00000004
2682 #define E1000_TX_FLAGS_IPV4             0x00000008
2683 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2684 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2685 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2686 
2687 static int e1000_tso(struct e1000_adapter *adapter,
2688                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2689                      __be16 protocol)
2690 {
2691         struct e1000_context_desc *context_desc;
2692         struct e1000_tx_buffer *buffer_info;
2693         unsigned int i;
2694         u32 cmd_length = 0;
2695         u16 ipcse = 0, tucse, mss;
2696         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2697 
2698         if (skb_is_gso(skb)) {
2699                 int err;
2700 
2701                 err = skb_cow_head(skb, 0);
2702                 if (err < 0)
2703                         return err;
2704 
2705                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2706                 mss = skb_shinfo(skb)->gso_size;
2707                 if (protocol == htons(ETH_P_IP)) {
2708                         struct iphdr *iph = ip_hdr(skb);
2709                         iph->tot_len = 0;
2710                         iph->check = 0;
2711                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2712                                                                  iph->daddr, 0,
2713                                                                  IPPROTO_TCP,
2714                                                                  0);
2715                         cmd_length = E1000_TXD_CMD_IP;
2716                         ipcse = skb_transport_offset(skb) - 1;
2717                 } else if (skb_is_gso_v6(skb)) {
2718                         ipv6_hdr(skb)->payload_len = 0;
2719                         tcp_hdr(skb)->check =
2720                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2721                                                  &ipv6_hdr(skb)->daddr,
2722                                                  0, IPPROTO_TCP, 0);
2723                         ipcse = 0;
2724                 }
2725                 ipcss = skb_network_offset(skb);
2726                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2727                 tucss = skb_transport_offset(skb);
2728                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2729                 tucse = 0;
2730 
2731                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2732                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2733 
2734                 i = tx_ring->next_to_use;
2735                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2736                 buffer_info = &tx_ring->buffer_info[i];
2737 
2738                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2739                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2740                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2741                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2742                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2743                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2744                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2745                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2746                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2747 
2748                 buffer_info->time_stamp = jiffies;
2749                 buffer_info->next_to_watch = i;
2750 
2751                 if (++i == tx_ring->count)
2752                         i = 0;
2753 
2754                 tx_ring->next_to_use = i;
2755 
2756                 return true;
2757         }
2758         return false;
2759 }
2760 
2761 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2762                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2763                           __be16 protocol)
2764 {
2765         struct e1000_context_desc *context_desc;
2766         struct e1000_tx_buffer *buffer_info;
2767         unsigned int i;
2768         u8 css;
2769         u32 cmd_len = E1000_TXD_CMD_DEXT;
2770 
2771         if (skb->ip_summed != CHECKSUM_PARTIAL)
2772                 return false;
2773 
2774         switch (protocol) {
2775         case cpu_to_be16(ETH_P_IP):
2776                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2777                         cmd_len |= E1000_TXD_CMD_TCP;
2778                 break;
2779         case cpu_to_be16(ETH_P_IPV6):
2780                 /* XXX not handling all IPV6 headers */
2781                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2782                         cmd_len |= E1000_TXD_CMD_TCP;
2783                 break;
2784         default:
2785                 if (unlikely(net_ratelimit()))
2786                         e_warn(drv, "checksum_partial proto=%x!\n",
2787                                skb->protocol);
2788                 break;
2789         }
2790 
2791         css = skb_checksum_start_offset(skb);
2792 
2793         i = tx_ring->next_to_use;
2794         buffer_info = &tx_ring->buffer_info[i];
2795         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2796 
2797         context_desc->lower_setup.ip_config = 0;
2798         context_desc->upper_setup.tcp_fields.tucss = css;
2799         context_desc->upper_setup.tcp_fields.tucso =
2800                 css + skb->csum_offset;
2801         context_desc->upper_setup.tcp_fields.tucse = 0;
2802         context_desc->tcp_seg_setup.data = 0;
2803         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2804 
2805         buffer_info->time_stamp = jiffies;
2806         buffer_info->next_to_watch = i;
2807 
2808         if (unlikely(++i == tx_ring->count))
2809                 i = 0;
2810 
2811         tx_ring->next_to_use = i;
2812 
2813         return true;
2814 }
2815 
2816 #define E1000_MAX_TXD_PWR       12
2817 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2818 
2819 static int e1000_tx_map(struct e1000_adapter *adapter,
2820                         struct e1000_tx_ring *tx_ring,
2821                         struct sk_buff *skb, unsigned int first,
2822                         unsigned int max_per_txd, unsigned int nr_frags,
2823                         unsigned int mss)
2824 {
2825         struct e1000_hw *hw = &adapter->hw;
2826         struct pci_dev *pdev = adapter->pdev;
2827         struct e1000_tx_buffer *buffer_info;
2828         unsigned int len = skb_headlen(skb);
2829         unsigned int offset = 0, size, count = 0, i;
2830         unsigned int f, bytecount, segs;
2831 
2832         i = tx_ring->next_to_use;
2833 
2834         while (len) {
2835                 buffer_info = &tx_ring->buffer_info[i];
2836                 size = min(len, max_per_txd);
2837                 /* Workaround for Controller erratum --
2838                  * descriptor for non-tso packet in a linear SKB that follows a
2839                  * tso gets written back prematurely before the data is fully
2840                  * DMA'd to the controller
2841                  */
2842                 if (!skb->data_len && tx_ring->last_tx_tso &&
2843                     !skb_is_gso(skb)) {
2844                         tx_ring->last_tx_tso = false;
2845                         size -= 4;
2846                 }
2847 
2848                 /* Workaround for premature desc write-backs
2849                  * in TSO mode.  Append 4-byte sentinel desc
2850                  */
2851                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2852                         size -= 4;
2853                 /* work-around for errata 10 and it applies
2854                  * to all controllers in PCI-X mode
2855                  * The fix is to make sure that the first descriptor of a
2856                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2857                  */
2858                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2859                              (size > 2015) && count == 0))
2860                         size = 2015;
2861 
2862                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2863                  * terminating buffers within evenly-aligned dwords.
2864                  */
2865                 if (unlikely(adapter->pcix_82544 &&
2866                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2867                    size > 4))
2868                         size -= 4;
2869 
2870                 buffer_info->length = size;
2871                 /* set time_stamp *before* dma to help avoid a possible race */
2872                 buffer_info->time_stamp = jiffies;
2873                 buffer_info->mapped_as_page = false;
2874                 buffer_info->dma = dma_map_single(&pdev->dev,
2875                                                   skb->data + offset,
2876                                                   size, DMA_TO_DEVICE);
2877                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2878                         goto dma_error;
2879                 buffer_info->next_to_watch = i;
2880 
2881                 len -= size;
2882                 offset += size;
2883                 count++;
2884                 if (len) {
2885                         i++;
2886                         if (unlikely(i == tx_ring->count))
2887                                 i = 0;
2888                 }
2889         }
2890 
2891         for (f = 0; f < nr_frags; f++) {
2892                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2893 
2894                 len = skb_frag_size(frag);
2895                 offset = 0;
2896 
2897                 while (len) {
2898                         unsigned long bufend;
2899                         i++;
2900                         if (unlikely(i == tx_ring->count))
2901                                 i = 0;
2902 
2903                         buffer_info = &tx_ring->buffer_info[i];
2904                         size = min(len, max_per_txd);
2905                         /* Workaround for premature desc write-backs
2906                          * in TSO mode.  Append 4-byte sentinel desc
2907                          */
2908                         if (unlikely(mss && f == (nr_frags-1) &&
2909                             size == len && size > 8))
2910                                 size -= 4;
2911                         /* Workaround for potential 82544 hang in PCI-X.
2912                          * Avoid terminating buffers within evenly-aligned
2913                          * dwords.
2914                          */
2915                         bufend = (unsigned long)
2916                                 page_to_phys(skb_frag_page(frag));
2917                         bufend += offset + size - 1;
2918                         if (unlikely(adapter->pcix_82544 &&
2919                                      !(bufend & 4) &&
2920                                      size > 4))
2921                                 size -= 4;
2922 
2923                         buffer_info->length = size;
2924                         buffer_info->time_stamp = jiffies;
2925                         buffer_info->mapped_as_page = true;
2926                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2927                                                 offset, size, DMA_TO_DEVICE);
2928                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2929                                 goto dma_error;
2930                         buffer_info->next_to_watch = i;
2931 
2932                         len -= size;
2933                         offset += size;
2934                         count++;
2935                 }
2936         }
2937 
2938         segs = skb_shinfo(skb)->gso_segs ?: 1;
2939         /* multiply data chunks by size of headers */
2940         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2941 
2942         tx_ring->buffer_info[i].skb = skb;
2943         tx_ring->buffer_info[i].segs = segs;
2944         tx_ring->buffer_info[i].bytecount = bytecount;
2945         tx_ring->buffer_info[first].next_to_watch = i;
2946 
2947         return count;
2948 
2949 dma_error:
2950         dev_err(&pdev->dev, "TX DMA map failed\n");
2951         buffer_info->dma = 0;
2952         if (count)
2953                 count--;
2954 
2955         while (count--) {
2956                 if (i == 0)
2957                         i += tx_ring->count;
2958                 i--;
2959                 buffer_info = &tx_ring->buffer_info[i];
2960                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2961         }
2962 
2963         return 0;
2964 }
2965 
2966 static void e1000_tx_queue(struct e1000_adapter *adapter,
2967                            struct e1000_tx_ring *tx_ring, int tx_flags,
2968                            int count)
2969 {
2970         struct e1000_tx_desc *tx_desc = NULL;
2971         struct e1000_tx_buffer *buffer_info;
2972         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2973         unsigned int i;
2974 
2975         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2976                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2977                              E1000_TXD_CMD_TSE;
2978                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2979 
2980                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2981                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2982         }
2983 
2984         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2985                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2986                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2987         }
2988 
2989         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2990                 txd_lower |= E1000_TXD_CMD_VLE;
2991                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2992         }
2993 
2994         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2995                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2996 
2997         i = tx_ring->next_to_use;
2998 
2999         while (count--) {
3000                 buffer_info = &tx_ring->buffer_info[i];
3001                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3002                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3003                 tx_desc->lower.data =
3004                         cpu_to_le32(txd_lower | buffer_info->length);
3005                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3006                 if (unlikely(++i == tx_ring->count))
3007                         i = 0;
3008         }
3009 
3010         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3011 
3012         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3013         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3014                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3015 
3016         /* Force memory writes to complete before letting h/w
3017          * know there are new descriptors to fetch.  (Only
3018          * applicable for weak-ordered memory model archs,
3019          * such as IA-64).
3020          */
3021         dma_wmb();
3022 
3023         tx_ring->next_to_use = i;
3024 }
3025 
3026 /* 82547 workaround to avoid controller hang in half-duplex environment.
3027  * The workaround is to avoid queuing a large packet that would span
3028  * the internal Tx FIFO ring boundary by notifying the stack to resend
3029  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3030  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3031  * to the beginning of the Tx FIFO.
3032  */
3033 
3034 #define E1000_FIFO_HDR                  0x10
3035 #define E1000_82547_PAD_LEN             0x3E0
3036 
3037 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3038                                        struct sk_buff *skb)
3039 {
3040         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3041         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3042 
3043         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3044 
3045         if (adapter->link_duplex != HALF_DUPLEX)
3046                 goto no_fifo_stall_required;
3047 
3048         if (atomic_read(&adapter->tx_fifo_stall))
3049                 return 1;
3050 
3051         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3052                 atomic_set(&adapter->tx_fifo_stall, 1);
3053                 return 1;
3054         }
3055 
3056 no_fifo_stall_required:
3057         adapter->tx_fifo_head += skb_fifo_len;
3058         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3059                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3060         return 0;
3061 }
3062 
3063 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3064 {
3065         struct e1000_adapter *adapter = netdev_priv(netdev);
3066         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3067 
3068         netif_stop_queue(netdev);
3069         /* Herbert's original patch had:
3070          *  smp_mb__after_netif_stop_queue();
3071          * but since that doesn't exist yet, just open code it.
3072          */
3073         smp_mb();
3074 
3075         /* We need to check again in a case another CPU has just
3076          * made room available.
3077          */
3078         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3079                 return -EBUSY;
3080 
3081         /* A reprieve! */
3082         netif_start_queue(netdev);
3083         ++adapter->restart_queue;
3084         return 0;
3085 }
3086 
3087 static int e1000_maybe_stop_tx(struct net_device *netdev,
3088                                struct e1000_tx_ring *tx_ring, int size)
3089 {
3090         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3091                 return 0;
3092         return __e1000_maybe_stop_tx(netdev, size);
3093 }
3094 
3095 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3096 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3097                                     struct net_device *netdev)
3098 {
3099         struct e1000_adapter *adapter = netdev_priv(netdev);
3100         struct e1000_hw *hw = &adapter->hw;
3101         struct e1000_tx_ring *tx_ring;
3102         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3103         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3104         unsigned int tx_flags = 0;
3105         unsigned int len = skb_headlen(skb);
3106         unsigned int nr_frags;
3107         unsigned int mss;
3108         int count = 0;
3109         int tso;
3110         unsigned int f;
3111         __be16 protocol = vlan_get_protocol(skb);
3112 
3113         /* This goes back to the question of how to logically map a Tx queue
3114          * to a flow.  Right now, performance is impacted slightly negatively
3115          * if using multiple Tx queues.  If the stack breaks away from a
3116          * single qdisc implementation, we can look at this again.
3117          */
3118         tx_ring = adapter->tx_ring;
3119 
3120         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3121          * packets may get corrupted during padding by HW.
3122          * To WA this issue, pad all small packets manually.
3123          */
3124         if (eth_skb_pad(skb))
3125                 return NETDEV_TX_OK;
3126 
3127         mss = skb_shinfo(skb)->gso_size;
3128         /* The controller does a simple calculation to
3129          * make sure there is enough room in the FIFO before
3130          * initiating the DMA for each buffer.  The calc is:
3131          * 4 = ceil(buffer len/mss).  To make sure we don't
3132          * overrun the FIFO, adjust the max buffer len if mss
3133          * drops.
3134          */
3135         if (mss) {
3136                 u8 hdr_len;
3137                 max_per_txd = min(mss << 2, max_per_txd);
3138                 max_txd_pwr = fls(max_per_txd) - 1;
3139 
3140                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3141                 if (skb->data_len && hdr_len == len) {
3142                         switch (hw->mac_type) {
3143                                 unsigned int pull_size;
3144                         case e1000_82544:
3145                                 /* Make sure we have room to chop off 4 bytes,
3146                                  * and that the end alignment will work out to
3147                                  * this hardware's requirements
3148                                  * NOTE: this is a TSO only workaround
3149                                  * if end byte alignment not correct move us
3150                                  * into the next dword
3151                                  */
3152                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3153                                     & 4)
3154                                         break;
3155                                 /* fall through */
3156                                 pull_size = min((unsigned int)4, skb->data_len);
3157                                 if (!__pskb_pull_tail(skb, pull_size)) {
3158                                         e_err(drv, "__pskb_pull_tail "
3159                                               "failed.\n");
3160                                         dev_kfree_skb_any(skb);
3161                                         return NETDEV_TX_OK;
3162                                 }
3163                                 len = skb_headlen(skb);
3164                                 break;
3165                         default:
3166                                 /* do nothing */
3167                                 break;
3168                         }
3169                 }
3170         }
3171 
3172         /* reserve a descriptor for the offload context */
3173         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3174                 count++;
3175         count++;
3176 
3177         /* Controller Erratum workaround */
3178         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3179                 count++;
3180 
3181         count += TXD_USE_COUNT(len, max_txd_pwr);
3182 
3183         if (adapter->pcix_82544)
3184                 count++;
3185 
3186         /* work-around for errata 10 and it applies to all controllers
3187          * in PCI-X mode, so add one more descriptor to the count
3188          */
3189         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3190                         (len > 2015)))
3191                 count++;
3192 
3193         nr_frags = skb_shinfo(skb)->nr_frags;
3194         for (f = 0; f < nr_frags; f++)
3195                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3196                                        max_txd_pwr);
3197         if (adapter->pcix_82544)
3198                 count += nr_frags;
3199 
3200         /* need: count + 2 desc gap to keep tail from touching
3201          * head, otherwise try next time
3202          */
3203         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3204                 return NETDEV_TX_BUSY;
3205 
3206         if (unlikely((hw->mac_type == e1000_82547) &&
3207                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3208                 netif_stop_queue(netdev);
3209                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3210                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3211                 return NETDEV_TX_BUSY;
3212         }
3213 
3214         if (skb_vlan_tag_present(skb)) {
3215                 tx_flags |= E1000_TX_FLAGS_VLAN;
3216                 tx_flags |= (skb_vlan_tag_get(skb) <<
3217                              E1000_TX_FLAGS_VLAN_SHIFT);
3218         }
3219 
3220         first = tx_ring->next_to_use;
3221 
3222         tso = e1000_tso(adapter, tx_ring, skb, protocol);
3223         if (tso < 0) {
3224                 dev_kfree_skb_any(skb);
3225                 return NETDEV_TX_OK;
3226         }
3227 
3228         if (likely(tso)) {
3229                 if (likely(hw->mac_type != e1000_82544))
3230                         tx_ring->last_tx_tso = true;
3231                 tx_flags |= E1000_TX_FLAGS_TSO;
3232         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3233                 tx_flags |= E1000_TX_FLAGS_CSUM;
3234 
3235         if (protocol == htons(ETH_P_IP))
3236                 tx_flags |= E1000_TX_FLAGS_IPV4;
3237 
3238         if (unlikely(skb->no_fcs))
3239                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3240 
3241         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3242                              nr_frags, mss);
3243 
3244         if (count) {
3245                 /* The descriptors needed is higher than other Intel drivers
3246                  * due to a number of workarounds.  The breakdown is below:
3247                  * Data descriptors: MAX_SKB_FRAGS + 1
3248                  * Context Descriptor: 1
3249                  * Keep head from touching tail: 2
3250                  * Workarounds: 3
3251                  */
3252                 int desc_needed = MAX_SKB_FRAGS + 7;
3253 
3254                 netdev_sent_queue(netdev, skb->len);
3255                 skb_tx_timestamp(skb);
3256 
3257                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3258 
3259                 /* 82544 potentially requires twice as many data descriptors
3260                  * in order to guarantee buffers don't end on evenly-aligned
3261                  * dwords
3262                  */
3263                 if (adapter->pcix_82544)
3264                         desc_needed += MAX_SKB_FRAGS + 1;
3265 
3266                 /* Make sure there is space in the ring for the next send. */
3267                 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3268 
3269                 if (!netdev_xmit_more() ||
3270                     netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3271                         writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3272                 }
3273         } else {
3274                 dev_kfree_skb_any(skb);
3275                 tx_ring->buffer_info[first].time_stamp = 0;
3276                 tx_ring->next_to_use = first;
3277         }
3278 
3279         return NETDEV_TX_OK;
3280 }
3281 
3282 #define NUM_REGS 38 /* 1 based count */
3283 static void e1000_regdump(struct e1000_adapter *adapter)
3284 {
3285         struct e1000_hw *hw = &adapter->hw;
3286         u32 regs[NUM_REGS];
3287         u32 *regs_buff = regs;
3288         int i = 0;
3289 
3290         static const char * const reg_name[] = {
3291                 "CTRL",  "STATUS",
3292                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3293                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3294                 "TIDV", "TXDCTL", "TADV", "TARC0",
3295                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3296                 "TXDCTL1", "TARC1",
3297                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3298                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3299                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3300         };
3301 
3302         regs_buff[0]  = er32(CTRL);
3303         regs_buff[1]  = er32(STATUS);
3304 
3305         regs_buff[2]  = er32(RCTL);
3306         regs_buff[3]  = er32(RDLEN);
3307         regs_buff[4]  = er32(RDH);
3308         regs_buff[5]  = er32(RDT);
3309         regs_buff[6]  = er32(RDTR);
3310 
3311         regs_buff[7]  = er32(TCTL);
3312         regs_buff[8]  = er32(TDBAL);
3313         regs_buff[9]  = er32(TDBAH);
3314         regs_buff[10] = er32(TDLEN);
3315         regs_buff[11] = er32(TDH);
3316         regs_buff[12] = er32(TDT);
3317         regs_buff[13] = er32(TIDV);
3318         regs_buff[14] = er32(TXDCTL);
3319         regs_buff[15] = er32(TADV);
3320         regs_buff[16] = er32(TARC0);
3321 
3322         regs_buff[17] = er32(TDBAL1);
3323         regs_buff[18] = er32(TDBAH1);
3324         regs_buff[19] = er32(TDLEN1);
3325         regs_buff[20] = er32(TDH1);
3326         regs_buff[21] = er32(TDT1);
3327         regs_buff[22] = er32(TXDCTL1);
3328         regs_buff[23] = er32(TARC1);
3329         regs_buff[24] = er32(CTRL_EXT);
3330         regs_buff[25] = er32(ERT);
3331         regs_buff[26] = er32(RDBAL0);
3332         regs_buff[27] = er32(RDBAH0);
3333         regs_buff[28] = er32(TDFH);
3334         regs_buff[29] = er32(TDFT);
3335         regs_buff[30] = er32(TDFHS);
3336         regs_buff[31] = er32(TDFTS);
3337         regs_buff[32] = er32(TDFPC);
3338         regs_buff[33] = er32(RDFH);
3339         regs_buff[34] = er32(RDFT);
3340         regs_buff[35] = er32(RDFHS);
3341         regs_buff[36] = er32(RDFTS);
3342         regs_buff[37] = er32(RDFPC);
3343 
3344         pr_info("Register dump\n");
3345         for (i = 0; i < NUM_REGS; i++)
3346                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3347 }
3348 
3349 /*
3350  * e1000_dump: Print registers, tx ring and rx ring
3351  */
3352 static void e1000_dump(struct e1000_adapter *adapter)
3353 {
3354         /* this code doesn't handle multiple rings */
3355         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3356         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3357         int i;
3358 
3359         if (!netif_msg_hw(adapter))
3360                 return;
3361 
3362         /* Print Registers */
3363         e1000_regdump(adapter);
3364 
3365         /* transmit dump */
3366         pr_info("TX Desc ring0 dump\n");
3367 
3368         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3369          *
3370          * Legacy Transmit Descriptor
3371          *   +--------------------------------------------------------------+
3372          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3373          *   +--------------------------------------------------------------+
3374          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3375          *   +--------------------------------------------------------------+
3376          *   63       48 47        36 35    32 31     24 23    16 15        0
3377          *
3378          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3379          *   63      48 47    40 39       32 31             16 15    8 7      0
3380          *   +----------------------------------------------------------------+
3381          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3382          *   +----------------------------------------------------------------+
3383          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3384          *   +----------------------------------------------------------------+
3385          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3386          *
3387          * Extended Data Descriptor (DTYP=0x1)
3388          *   +----------------------------------------------------------------+
3389          * 0 |                     Buffer Address [63:0]                      |
3390          *   +----------------------------------------------------------------+
3391          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3392          *   +----------------------------------------------------------------+
3393          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3394          */
3395         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3396         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3397 
3398         if (!netif_msg_tx_done(adapter))
3399                 goto rx_ring_summary;
3400 
3401         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3402                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3403                 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3404                 struct my_u { __le64 a; __le64 b; };
3405                 struct my_u *u = (struct my_u *)tx_desc;
3406                 const char *type;
3407 
3408                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3409                         type = "NTC/U";
3410                 else if (i == tx_ring->next_to_use)
3411                         type = "NTU";
3412                 else if (i == tx_ring->next_to_clean)
3413                         type = "NTC";
3414                 else
3415                         type = "";
3416 
3417                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3418                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3419                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3420                         (u64)buffer_info->dma, buffer_info->length,
3421                         buffer_info->next_to_watch,
3422                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3423         }
3424 
3425 rx_ring_summary:
3426         /* receive dump */
3427         pr_info("\nRX Desc ring dump\n");
3428 
3429         /* Legacy Receive Descriptor Format
3430          *
3431          * +-----------------------------------------------------+
3432          * |                Buffer Address [63:0]                |
3433          * +-----------------------------------------------------+
3434          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3435          * +-----------------------------------------------------+
3436          * 63       48 47    40 39      32 31         16 15      0
3437          */
3438         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3439 
3440         if (!netif_msg_rx_status(adapter))
3441                 goto exit;
3442 
3443         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3444                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3445                 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3446                 struct my_u { __le64 a; __le64 b; };
3447                 struct my_u *u = (struct my_u *)rx_desc;
3448                 const char *type;
3449 
3450                 if (i == rx_ring->next_to_use)
3451                         type = "NTU";
3452                 else if (i == rx_ring->next_to_clean)
3453                         type = "NTC";
3454                 else
3455                         type = "";
3456 
3457                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3458                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3459                         (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3460         } /* for */
3461 
3462         /* dump the descriptor caches */
3463         /* rx */
3464         pr_info("Rx descriptor cache in 64bit format\n");
3465         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3466                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3467                         i,
3468                         readl(adapter->hw.hw_addr + i+4),
3469                         readl(adapter->hw.hw_addr + i),
3470                         readl(adapter->hw.hw_addr + i+12),
3471                         readl(adapter->hw.hw_addr + i+8));
3472         }
3473         /* tx */
3474         pr_info("Tx descriptor cache in 64bit format\n");
3475         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3476                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3477                         i,
3478                         readl(adapter->hw.hw_addr + i+4),
3479                         readl(adapter->hw.hw_addr + i),
3480                         readl(adapter->hw.hw_addr + i+12),
3481                         readl(adapter->hw.hw_addr + i+8));
3482         }
3483 exit:
3484         return;
3485 }
3486 
3487 /**
3488  * e1000_tx_timeout - Respond to a Tx Hang
3489  * @netdev: network interface device structure
3490  **/
3491 static void e1000_tx_timeout(struct net_device *netdev)
3492 {
3493         struct e1000_adapter *adapter = netdev_priv(netdev);
3494 
3495         /* Do the reset outside of interrupt context */
3496         adapter->tx_timeout_count++;
3497         schedule_work(&adapter->reset_task);
3498 }
3499 
3500 static void e1000_reset_task(struct work_struct *work)
3501 {
3502         struct e1000_adapter *adapter =
3503                 container_of(work, struct e1000_adapter, reset_task);
3504 
3505         e_err(drv, "Reset adapter\n");
3506         e1000_reinit_locked(adapter);
3507 }
3508 
3509 /**
3510  * e1000_change_mtu - Change the Maximum Transfer Unit
3511  * @netdev: network interface device structure
3512  * @new_mtu: new value for maximum frame size
3513  *
3514  * Returns 0 on success, negative on failure
3515  **/
3516 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3517 {
3518         struct e1000_adapter *adapter = netdev_priv(netdev);
3519         struct e1000_hw *hw = &adapter->hw;
3520         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3521 
3522         /* Adapter-specific max frame size limits. */
3523         switch (hw->mac_type) {
3524         case e1000_undefined ... e1000_82542_rev2_1:
3525                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3526                         e_err(probe, "Jumbo Frames not supported.\n");
3527                         return -EINVAL;
3528                 }
3529                 break;
3530         default:
3531                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3532                 break;
3533         }
3534 
3535         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3536                 msleep(1);
3537         /* e1000_down has a dependency on max_frame_size */
3538         hw->max_frame_size = max_frame;
3539         if (netif_running(netdev)) {
3540                 /* prevent buffers from being reallocated */
3541                 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3542                 e1000_down(adapter);
3543         }
3544 
3545         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3546          * means we reserve 2 more, this pushes us to allocate from the next
3547          * larger slab size.
3548          * i.e. RXBUFFER_2048 --> size-4096 slab
3549          * however with the new *_jumbo_rx* routines, jumbo receives will use
3550          * fragmented skbs
3551          */
3552 
3553         if (max_frame <= E1000_RXBUFFER_2048)
3554                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3555         else
3556 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3557                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3558 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3559                 adapter->rx_buffer_len = PAGE_SIZE;
3560 #endif
3561 
3562         /* adjust allocation if LPE protects us, and we aren't using SBP */
3563         if (!hw->tbi_compatibility_on &&
3564             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3565              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3566                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3567 
3568         pr_info("%s changing MTU from %d to %d\n",
3569                 netdev->name, netdev->mtu, new_mtu);
3570         netdev->mtu = new_mtu;
3571 
3572         if (netif_running(netdev))
3573                 e1000_up(adapter);
3574         else
3575                 e1000_reset(adapter);
3576 
3577         clear_bit(__E1000_RESETTING, &adapter->flags);
3578 
3579         return 0;
3580 }
3581 
3582 /**
3583  * e1000_update_stats - Update the board statistics counters
3584  * @adapter: board private structure
3585  **/
3586 void e1000_update_stats(struct e1000_adapter *adapter)
3587 {
3588         struct net_device *netdev = adapter->netdev;
3589         struct e1000_hw *hw = &adapter->hw;
3590         struct pci_dev *pdev = adapter->pdev;
3591         unsigned long flags;
3592         u16 phy_tmp;
3593 
3594 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3595 
3596         /* Prevent stats update while adapter is being reset, or if the pci
3597          * connection is down.
3598          */
3599         if (adapter->link_speed == 0)
3600                 return;
3601         if (pci_channel_offline(pdev))
3602                 return;
3603 
3604         spin_lock_irqsave(&adapter->stats_lock, flags);
3605 
3606         /* these counters are modified from e1000_tbi_adjust_stats,
3607          * called from the interrupt context, so they must only
3608          * be written while holding adapter->stats_lock
3609          */
3610 
3611         adapter->stats.crcerrs += er32(CRCERRS);
3612         adapter->stats.gprc += er32(GPRC);
3613         adapter->stats.gorcl += er32(GORCL);
3614         adapter->stats.gorch += er32(GORCH);
3615         adapter->stats.bprc += er32(BPRC);
3616         adapter->stats.mprc += er32(MPRC);
3617         adapter->stats.roc += er32(ROC);
3618 
3619         adapter->stats.prc64 += er32(PRC64);
3620         adapter->stats.prc127 += er32(PRC127);
3621         adapter->stats.prc255 += er32(PRC255);
3622         adapter->stats.prc511 += er32(PRC511);
3623         adapter->stats.prc1023 += er32(PRC1023);
3624         adapter->stats.prc1522 += er32(PRC1522);
3625 
3626         adapter->stats.symerrs += er32(SYMERRS);
3627         adapter->stats.mpc += er32(MPC);
3628         adapter->stats.scc += er32(SCC);
3629         adapter->stats.ecol += er32(ECOL);
3630         adapter->stats.mcc += er32(MCC);
3631         adapter->stats.latecol += er32(LATECOL);
3632         adapter->stats.dc += er32(DC);
3633         adapter->stats.sec += er32(SEC);
3634         adapter->stats.rlec += er32(RLEC);
3635         adapter->stats.xonrxc += er32(XONRXC);
3636         adapter->stats.xontxc += er32(XONTXC);
3637         adapter->stats.xoffrxc += er32(XOFFRXC);
3638         adapter->stats.xofftxc += er32(XOFFTXC);
3639         adapter->stats.fcruc += er32(FCRUC);
3640         adapter->stats.gptc += er32(GPTC);
3641         adapter->stats.gotcl += er32(GOTCL);
3642         adapter->stats.gotch += er32(GOTCH);
3643         adapter->stats.rnbc += er32(RNBC);
3644         adapter->stats.ruc += er32(RUC);
3645         adapter->stats.rfc += er32(RFC);
3646         adapter->stats.rjc += er32(RJC);
3647         adapter->stats.torl += er32(TORL);
3648         adapter->stats.torh += er32(TORH);
3649         adapter->stats.totl += er32(TOTL);
3650         adapter->stats.toth += er32(TOTH);
3651         adapter->stats.tpr += er32(TPR);
3652 
3653         adapter->stats.ptc64 += er32(PTC64);
3654         adapter->stats.ptc127 += er32(PTC127);
3655         adapter->stats.ptc255 += er32(PTC255);
3656         adapter->stats.ptc511 += er32(PTC511);
3657         adapter->stats.ptc1023 += er32(PTC1023);
3658         adapter->stats.ptc1522 += er32(PTC1522);
3659 
3660         adapter->stats.mptc += er32(MPTC);
3661         adapter->stats.bptc += er32(BPTC);
3662 
3663         /* used for adaptive IFS */
3664 
3665         hw->tx_packet_delta = er32(TPT);
3666         adapter->stats.tpt += hw->tx_packet_delta;
3667         hw->collision_delta = er32(COLC);
3668         adapter->stats.colc += hw->collision_delta;
3669 
3670         if (hw->mac_type >= e1000_82543) {
3671                 adapter->stats.algnerrc += er32(ALGNERRC);
3672                 adapter->stats.rxerrc += er32(RXERRC);
3673                 adapter->stats.tncrs += er32(TNCRS);
3674                 adapter->stats.cexterr += er32(CEXTERR);
3675                 adapter->stats.tsctc += er32(TSCTC);
3676                 adapter->stats.tsctfc += er32(TSCTFC);
3677         }
3678 
3679         /* Fill out the OS statistics structure */
3680         netdev->stats.multicast = adapter->stats.mprc;
3681         netdev->stats.collisions = adapter->stats.colc;
3682 
3683         /* Rx Errors */
3684 
3685         /* RLEC on some newer hardware can be incorrect so build
3686          * our own version based on RUC and ROC
3687          */
3688         netdev->stats.rx_errors = adapter->stats.rxerrc +
3689                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3690                 adapter->stats.ruc + adapter->stats.roc +
3691                 adapter->stats.cexterr;
3692         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3693         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3694         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3695         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3696         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3697 
3698         /* Tx Errors */
3699         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3700         netdev->stats.tx_errors = adapter->stats.txerrc;
3701         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3702         netdev->stats.tx_window_errors = adapter->stats.latecol;
3703         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3704         if (hw->bad_tx_carr_stats_fd &&
3705             adapter->link_duplex == FULL_DUPLEX) {
3706                 netdev->stats.tx_carrier_errors = 0;
3707                 adapter->stats.tncrs = 0;
3708         }
3709 
3710         /* Tx Dropped needs to be maintained elsewhere */
3711 
3712         /* Phy Stats */
3713         if (hw->media_type == e1000_media_type_copper) {
3714                 if ((adapter->link_speed == SPEED_1000) &&
3715                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3716                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3717                         adapter->phy_stats.idle_errors += phy_tmp;
3718                 }
3719 
3720                 if ((hw->mac_type <= e1000_82546) &&
3721                    (hw->phy_type == e1000_phy_m88) &&
3722                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3723                         adapter->phy_stats.receive_errors += phy_tmp;
3724         }
3725 
3726         /* Management Stats */
3727         if (hw->has_smbus) {
3728                 adapter->stats.mgptc += er32(MGTPTC);
3729                 adapter->stats.mgprc += er32(MGTPRC);
3730                 adapter->stats.mgpdc += er32(MGTPDC);
3731         }
3732 
3733         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3734 }
3735 
3736 /**
3737  * e1000_intr - Interrupt Handler
3738  * @irq: interrupt number
3739  * @data: pointer to a network interface device structure
3740  **/
3741 static irqreturn_t e1000_intr(int irq, void *data)
3742 {
3743         struct net_device *netdev = data;
3744         struct e1000_adapter *adapter = netdev_priv(netdev);
3745         struct e1000_hw *hw = &adapter->hw;
3746         u32 icr = er32(ICR);
3747 
3748         if (unlikely((!icr)))
3749                 return IRQ_NONE;  /* Not our interrupt */
3750 
3751         /* we might have caused the interrupt, but the above
3752          * read cleared it, and just in case the driver is
3753          * down there is nothing to do so return handled
3754          */
3755         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3756                 return IRQ_HANDLED;
3757 
3758         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3759                 hw->get_link_status = 1;
3760                 /* guard against interrupt when we're going down */
3761                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3762                         schedule_delayed_work(&adapter->watchdog_task, 1);
3763         }
3764 
3765         /* disable interrupts, without the synchronize_irq bit */
3766         ew32(IMC, ~0);
3767         E1000_WRITE_FLUSH();
3768 
3769         if (likely(napi_schedule_prep(&adapter->napi))) {
3770                 adapter->total_tx_bytes = 0;
3771                 adapter->total_tx_packets = 0;
3772                 adapter->total_rx_bytes = 0;
3773                 adapter->total_rx_packets = 0;
3774                 __napi_schedule(&adapter->napi);
3775         } else {
3776                 /* this really should not happen! if it does it is basically a
3777                  * bug, but not a hard error, so enable ints and continue
3778                  */
3779                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3780                         e1000_irq_enable(adapter);
3781         }
3782 
3783         return IRQ_HANDLED;
3784 }
3785 
3786 /**
3787  * e1000_clean - NAPI Rx polling callback
3788  * @adapter: board private structure
3789  **/
3790 static int e1000_clean(struct napi_struct *napi, int budget)
3791 {
3792         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3793                                                      napi);
3794         int tx_clean_complete = 0, work_done = 0;
3795 
3796         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3797 
3798         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3799 
3800         if (!tx_clean_complete || work_done == budget)
3801                 return budget;
3802 
3803         /* Exit the polling mode, but don't re-enable interrupts if stack might
3804          * poll us due to busy-polling
3805          */
3806         if (likely(napi_complete_done(napi, work_done))) {
3807                 if (likely(adapter->itr_setting & 3))
3808                         e1000_set_itr(adapter);
3809                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3810                         e1000_irq_enable(adapter);
3811         }
3812 
3813         return work_done;
3814 }
3815 
3816 /**
3817  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3818  * @adapter: board private structure
3819  **/
3820 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3821                                struct e1000_tx_ring *tx_ring)
3822 {
3823         struct e1000_hw *hw = &adapter->hw;
3824         struct net_device *netdev = adapter->netdev;
3825         struct e1000_tx_desc *tx_desc, *eop_desc;
3826         struct e1000_tx_buffer *buffer_info;
3827         unsigned int i, eop;
3828         unsigned int count = 0;
3829         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3830         unsigned int bytes_compl = 0, pkts_compl = 0;
3831 
3832         i = tx_ring->next_to_clean;
3833         eop = tx_ring->buffer_info[i].next_to_watch;
3834         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3835 
3836         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3837                (count < tx_ring->count)) {
3838                 bool cleaned = false;
3839                 dma_rmb();      /* read buffer_info after eop_desc */
3840                 for ( ; !cleaned; count++) {
3841                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3842                         buffer_info = &tx_ring->buffer_info[i];
3843                         cleaned = (i == eop);
3844 
3845                         if (cleaned) {
3846                                 total_tx_packets += buffer_info->segs;
3847                                 total_tx_bytes += buffer_info->bytecount;
3848                                 if (buffer_info->skb) {
3849                                         bytes_compl += buffer_info->skb->len;
3850                                         pkts_compl++;
3851                                 }
3852 
3853                         }
3854                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3855                         tx_desc->upper.data = 0;
3856 
3857                         if (unlikely(++i == tx_ring->count))
3858                                 i = 0;
3859                 }
3860 
3861                 eop = tx_ring->buffer_info[i].next_to_watch;
3862                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3863         }
3864 
3865         /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3866          * which will reuse the cleaned buffers.
3867          */
3868         smp_store_release(&tx_ring->next_to_clean, i);
3869 
3870         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3871 
3872 #define TX_WAKE_THRESHOLD 32
3873         if (unlikely(count && netif_carrier_ok(netdev) &&
3874                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3875                 /* Make sure that anybody stopping the queue after this
3876                  * sees the new next_to_clean.
3877                  */
3878                 smp_mb();
3879 
3880                 if (netif_queue_stopped(netdev) &&
3881                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3882                         netif_wake_queue(netdev);
3883                         ++adapter->restart_queue;
3884                 }
3885         }
3886 
3887         if (adapter->detect_tx_hung) {
3888                 /* Detect a transmit hang in hardware, this serializes the
3889                  * check with the clearing of time_stamp and movement of i
3890                  */
3891                 adapter->detect_tx_hung = false;
3892                 if (tx_ring->buffer_info[eop].time_stamp &&
3893                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3894                                (adapter->tx_timeout_factor * HZ)) &&
3895                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3896 
3897                         /* detected Tx unit hang */
3898                         e_err(drv, "Detected Tx Unit Hang\n"
3899                               "  Tx Queue             <%lu>\n"
3900                               "  TDH                  <%x>\n"
3901                               "  TDT                  <%x>\n"
3902                               "  next_to_use          <%x>\n"
3903                               "  next_to_clean        <%x>\n"
3904                               "buffer_info[next_to_clean]\n"
3905                               "  time_stamp           <%lx>\n"
3906                               "  next_to_watch        <%x>\n"
3907                               "  jiffies              <%lx>\n"
3908                               "  next_to_watch.status <%x>\n",
3909                                 (unsigned long)(tx_ring - adapter->tx_ring),
3910                                 readl(hw->hw_addr + tx_ring->tdh),
3911                                 readl(hw->hw_addr + tx_ring->tdt),
3912                                 tx_ring->next_to_use,
3913                                 tx_ring->next_to_clean,
3914                                 tx_ring->buffer_info[eop].time_stamp,
3915                                 eop,
3916                                 jiffies,
3917                                 eop_desc->upper.fields.status);
3918                         e1000_dump(adapter);
3919                         netif_stop_queue(netdev);
3920                 }
3921         }
3922         adapter->total_tx_bytes += total_tx_bytes;
3923         adapter->total_tx_packets += total_tx_packets;
3924         netdev->stats.tx_bytes += total_tx_bytes;
3925         netdev->stats.tx_packets += total_tx_packets;
3926         return count < tx_ring->count;
3927 }
3928 
3929 /**
3930  * e1000_rx_checksum - Receive Checksum Offload for 82543
3931  * @adapter:     board private structure
3932  * @status_err:  receive descriptor status and error fields
3933  * @csum:        receive descriptor csum field
3934  * @sk_buff:     socket buffer with received data
3935  **/
3936 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3937                               u32 csum, struct sk_buff *skb)
3938 {
3939         struct e1000_hw *hw = &adapter->hw;
3940         u16 status = (u16)status_err;
3941         u8 errors = (u8)(status_err >> 24);
3942 
3943         skb_checksum_none_assert(skb);
3944 
3945         /* 82543 or newer only */
3946         if (unlikely(hw->mac_type < e1000_82543))
3947                 return;
3948         /* Ignore Checksum bit is set */
3949         if (unlikely(status & E1000_RXD_STAT_IXSM))
3950                 return;
3951         /* TCP/UDP checksum error bit is set */
3952         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3953                 /* let the stack verify checksum errors */
3954                 adapter->hw_csum_err++;
3955                 return;
3956         }
3957         /* TCP/UDP Checksum has not been calculated */
3958         if (!(status & E1000_RXD_STAT_TCPCS))
3959                 return;
3960 
3961         /* It must be a TCP or UDP packet with a valid checksum */
3962         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3963                 /* TCP checksum is good */
3964                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3965         }
3966         adapter->hw_csum_good++;
3967 }
3968 
3969 /**
3970  * e1000_consume_page - helper function for jumbo Rx path
3971  **/
3972 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3973                                u16 length)
3974 {
3975         bi->rxbuf.page = NULL;
3976         skb->len += length;
3977         skb->data_len += length;
3978         skb->truesize += PAGE_SIZE;
3979 }
3980 
3981 /**
3982  * e1000_receive_skb - helper function to handle rx indications
3983  * @adapter: board private structure
3984  * @status: descriptor status field as written by hardware
3985  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3986  * @skb: pointer to sk_buff to be indicated to stack
3987  */
3988 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3989                               __le16 vlan, struct sk_buff *skb)
3990 {
3991         skb->protocol = eth_type_trans(skb, adapter->netdev);
3992 
3993         if (status & E1000_RXD_STAT_VP) {
3994                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3995 
3996                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3997         }
3998         napi_gro_receive(&adapter->napi, skb);
3999 }
4000 
4001 /**
4002  * e1000_tbi_adjust_stats
4003  * @hw: Struct containing variables accessed by shared code
4004  * @frame_len: The length of the frame in question
4005  * @mac_addr: The Ethernet destination address of the frame in question
4006  *
4007  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4008  */
4009 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4010                                    struct e1000_hw_stats *stats,
4011                                    u32 frame_len, const u8 *mac_addr)
4012 {
4013         u64 carry_bit;
4014 
4015         /* First adjust the frame length. */
4016         frame_len--;
4017         /* We need to adjust the statistics counters, since the hardware
4018          * counters overcount this packet as a CRC error and undercount
4019          * the packet as a good packet
4020          */
4021         /* This packet should not be counted as a CRC error. */
4022         stats->crcerrs--;
4023         /* This packet does count as a Good Packet Received. */
4024         stats->gprc++;
4025 
4026         /* Adjust the Good Octets received counters */
4027         carry_bit = 0x80000000 & stats->gorcl;
4028         stats->gorcl += frame_len;
4029         /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4030          * Received Count) was one before the addition,
4031          * AND it is zero after, then we lost the carry out,
4032          * need to add one to Gorch (Good Octets Received Count High).
4033          * This could be simplified if all environments supported
4034          * 64-bit integers.
4035          */
4036         if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4037                 stats->gorch++;
4038         /* Is this a broadcast or multicast?  Check broadcast first,
4039          * since the test for a multicast frame will test positive on
4040          * a broadcast frame.
4041          */
4042         if (is_broadcast_ether_addr(mac_addr))
4043                 stats->bprc++;
4044         else if (is_multicast_ether_addr(mac_addr))
4045                 stats->mprc++;
4046 
4047         if (frame_len == hw->max_frame_size) {
4048                 /* In this case, the hardware has overcounted the number of
4049                  * oversize frames.
4050                  */
4051                 if (stats->roc > 0)
4052                         stats->roc--;
4053         }
4054 
4055         /* Adjust the bin counters when the extra byte put the frame in the
4056          * wrong bin. Remember that the frame_len was adjusted above.
4057          */
4058         if (frame_len == 64) {
4059                 stats->prc64++;
4060                 stats->prc127--;
4061         } else if (frame_len == 127) {
4062                 stats->prc127++;
4063                 stats->prc255--;
4064         } else if (frame_len == 255) {
4065                 stats->prc255++;
4066                 stats->prc511--;
4067         } else if (frame_len == 511) {
4068                 stats->prc511++;
4069                 stats->prc1023--;
4070         } else if (frame_len == 1023) {
4071                 stats->prc1023++;
4072                 stats->prc1522--;
4073         } else if (frame_len == 1522) {
4074                 stats->prc1522++;
4075         }
4076 }
4077 
4078 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4079                                     u8 status, u8 errors,
4080                                     u32 length, const u8 *data)
4081 {
4082         struct e1000_hw *hw = &adapter->hw;
4083         u8 last_byte = *(data + length - 1);
4084 
4085         if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4086                 unsigned long irq_flags;
4087 
4088                 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4089                 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4090                 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4091 
4092                 return true;
4093         }
4094 
4095         return false;
4096 }
4097 
4098 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4099                                           unsigned int bufsz)
4100 {
4101         struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4102 
4103         if (unlikely(!skb))
4104                 adapter->alloc_rx_buff_failed++;
4105         return skb;
4106 }
4107 
4108 /**
4109  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4110  * @adapter: board private structure
4111  * @rx_ring: ring to clean
4112  * @work_done: amount of napi work completed this call
4113  * @work_to_do: max amount of work allowed for this call to do
4114  *
4115  * the return value indicates whether actual cleaning was done, there
4116  * is no guarantee that everything was cleaned
4117  */
4118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4119                                      struct e1000_rx_ring *rx_ring,
4120                                      int *work_done, int work_to_do)
4121 {
4122         struct net_device *netdev = adapter->netdev;
4123         struct pci_dev *pdev = adapter->pdev;
4124         struct e1000_rx_desc *rx_desc, *next_rxd;
4125         struct e1000_rx_buffer *buffer_info, *next_buffer;
4126         u32 length;
4127         unsigned int i;
4128         int cleaned_count = 0;
4129         bool cleaned = false;
4130         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4131 
4132         i = rx_ring->next_to_clean;
4133         rx_desc = E1000_RX_DESC(*rx_ring, i);
4134         buffer_info = &rx_ring->buffer_info[i];
4135 
4136         while (rx_desc->status & E1000_RXD_STAT_DD) {
4137                 struct sk_buff *skb;
4138                 u8 status;
4139 
4140                 if (*work_done >= work_to_do)
4141                         break;
4142                 (*work_done)++;
4143                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4144 
4145                 status = rx_desc->status;
4146 
4147                 if (++i == rx_ring->count)
4148                         i = 0;
4149 
4150                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4151                 prefetch(next_rxd);
4152 
4153                 next_buffer = &rx_ring->buffer_info[i];
4154 
4155                 cleaned = true;
4156                 cleaned_count++;
4157                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4158                                adapter->rx_buffer_len, DMA_FROM_DEVICE);
4159                 buffer_info->dma = 0;
4160 
4161                 length = le16_to_cpu(rx_desc->length);
4162 
4163                 /* errors is only valid for DD + EOP descriptors */
4164                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4165                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4166                         u8 *mapped = page_address(buffer_info->rxbuf.page);
4167 
4168                         if (e1000_tbi_should_accept(adapter, status,
4169                                                     rx_desc->errors,
4170                                                     length, mapped)) {
4171                                 length--;
4172                         } else if (netdev->features & NETIF_F_RXALL) {
4173                                 goto process_skb;
4174                         } else {
4175                                 /* an error means any chain goes out the window
4176                                  * too
4177                                  */
4178                                 dev_kfree_skb(rx_ring->rx_skb_top);
4179                                 rx_ring->rx_skb_top = NULL;
4180                                 goto next_desc;
4181                         }
4182                 }
4183 
4184 #define rxtop rx_ring->rx_skb_top
4185 process_skb:
4186                 if (!(status & E1000_RXD_STAT_EOP)) {
4187                         /* this descriptor is only the beginning (or middle) */
4188                         if (!rxtop) {
4189                                 /* this is the beginning of a chain */
4190                                 rxtop = napi_get_frags(&adapter->napi);
4191                                 if (!rxtop)
4192                                         break;
4193 
4194                                 skb_fill_page_desc(rxtop, 0,
4195                                                    buffer_info->rxbuf.page,
4196                                                    0, length);
4197                         } else {
4198                                 /* this is the middle of a chain */
4199                                 skb_fill_page_desc(rxtop,
4200                                     skb_shinfo(rxtop)->nr_frags,
4201                                     buffer_info->rxbuf.page, 0, length);
4202                         }
4203                         e1000_consume_page(buffer_info, rxtop, length);
4204                         goto next_desc;
4205                 } else {
4206                         if (rxtop) {
4207                                 /* end of the chain */
4208                                 skb_fill_page_desc(rxtop,
4209                                     skb_shinfo(rxtop)->nr_frags,
4210                                     buffer_info->rxbuf.page, 0, length);
4211                                 skb = rxtop;
4212                                 rxtop = NULL;
4213                                 e1000_consume_page(buffer_info, skb, length);
4214                         } else {
4215                                 struct page *p;
4216                                 /* no chain, got EOP, this buf is the packet
4217                                  * copybreak to save the put_page/alloc_page
4218                                  */
4219                                 p = buffer_info->rxbuf.page;
4220                                 if (length <= copybreak) {
4221                                         u8 *vaddr;
4222 
4223                                         if (likely(!(netdev->features & NETIF_F_RXFCS)))
4224                                                 length -= 4;
4225                                         skb = e1000_alloc_rx_skb(adapter,
4226                                                                  length);
4227                                         if (!skb)
4228                                                 break;
4229 
4230                                         vaddr = kmap_atomic(p);
4231                                         memcpy(skb_tail_pointer(skb), vaddr,
4232                                                length);
4233                                         kunmap_atomic(vaddr);
4234                                         /* re-use the page, so don't erase
4235                                          * buffer_info->rxbuf.page
4236                                          */
4237                                         skb_put(skb, length);
4238                                         e1000_rx_checksum(adapter,
4239                                                           status | rx_desc->errors << 24,
4240                                                           le16_to_cpu(rx_desc->csum), skb);
4241 
4242                                         total_rx_bytes += skb->len;
4243                                         total_rx_packets++;
4244 
4245                                         e1000_receive_skb(adapter, status,
4246                                                           rx_desc->special, skb);
4247                                         goto next_desc;
4248                                 } else {
4249                                         skb = napi_get_frags(&adapter->napi);
4250                                         if (!skb) {
4251                                                 adapter->alloc_rx_buff_failed++;
4252                                                 break;
4253                                         }
4254                                         skb_fill_page_desc(skb, 0, p, 0,
4255                                                            length);
4256                                         e1000_consume_page(buffer_info, skb,
4257                                                            length);
4258                                 }
4259                         }
4260                 }
4261 
4262                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4263                 e1000_rx_checksum(adapter,
4264                                   (u32)(status) |
4265                                   ((u32)(rx_desc->errors) << 24),
4266                                   le16_to_cpu(rx_desc->csum), skb);
4267 
4268                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4269                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4270                         pskb_trim(skb, skb->len - 4);
4271                 total_rx_packets++;
4272 
4273                 if (status & E1000_RXD_STAT_VP) {
4274                         __le16 vlan = rx_desc->special;
4275                         u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4276 
4277                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4278                 }
4279 
4280                 napi_gro_frags(&adapter->napi);
4281 
4282 next_desc:
4283                 rx_desc->status = 0;
4284 
4285                 /* return some buffers to hardware, one at a time is too slow */
4286                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4287                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4288                         cleaned_count = 0;
4289                 }
4290 
4291                 /* use prefetched values */
4292                 rx_desc = next_rxd;
4293                 buffer_info = next_buffer;
4294         }
4295         rx_ring->next_to_clean = i;
4296 
4297         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4298         if (cleaned_count)
4299                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4300 
4301         adapter->total_rx_packets += total_rx_packets;
4302         adapter->total_rx_bytes += total_rx_bytes;
4303         netdev->stats.rx_bytes += total_rx_bytes;
4304         netdev->stats.rx_packets += total_rx_packets;
4305         return cleaned;
4306 }
4307 
4308 /* this should improve performance for small packets with large amounts
4309  * of reassembly being done in the stack
4310  */
4311 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4312                                        struct e1000_rx_buffer *buffer_info,
4313                                        u32 length, const void *data)
4314 {
4315         struct sk_buff *skb;
4316 
4317         if (length > copybreak)
4318                 return NULL;
4319 
4320         skb = e1000_alloc_rx_skb(adapter, length);
4321         if (!skb)
4322                 return NULL;
4323 
4324         dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4325                                 length, DMA_FROM_DEVICE);
4326 
4327         skb_put_data(skb, data, length);
4328 
4329         return skb;
4330 }
4331 
4332 /**
4333  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4334  * @adapter: board private structure
4335  * @rx_ring: ring to clean
4336  * @work_done: amount of napi work completed this call
4337  * @work_to_do: max amount of work allowed for this call to do
4338  */
4339 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4340                                struct e1000_rx_ring *rx_ring,
4341                                int *work_done, int work_to_do)
4342 {
4343         struct net_device *netdev = adapter->netdev;
4344         struct pci_dev *pdev = adapter->pdev;
4345         struct e1000_rx_desc *rx_desc, *next_rxd;
4346         struct e1000_rx_buffer *buffer_info, *next_buffer;
4347         u32 length;
4348         unsigned int i;
4349         int cleaned_count = 0;
4350         bool cleaned = false;
4351         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4352 
4353         i = rx_ring->next_to_clean;
4354         rx_desc = E1000_RX_DESC(*rx_ring, i);
4355         buffer_info = &rx_ring->buffer_info[i];
4356 
4357         while (rx_desc->status & E1000_RXD_STAT_DD) {
4358                 struct sk_buff *skb;
4359                 u8 *data;
4360                 u8 status;
4361 
4362                 if (*work_done >= work_to_do)
4363                         break;
4364                 (*work_done)++;
4365                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4366 
4367                 status = rx_desc->status;
4368                 length = le16_to_cpu(rx_desc->length);
4369 
4370                 data = buffer_info->rxbuf.data;
4371                 prefetch(data);
4372                 skb = e1000_copybreak(adapter, buffer_info, length, data);
4373                 if (!skb) {
4374                         unsigned int frag_len = e1000_frag_len(adapter);
4375 
4376                         skb = build_skb(data - E1000_HEADROOM, frag_len);
4377                         if (!skb) {
4378                                 adapter->alloc_rx_buff_failed++;
4379                                 break;
4380                         }
4381 
4382                         skb_reserve(skb, E1000_HEADROOM);
4383                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4384                                          adapter->rx_buffer_len,
4385                                          DMA_FROM_DEVICE);
4386                         buffer_info->dma = 0;
4387                         buffer_info->rxbuf.data = NULL;
4388                 }
4389 
4390                 if (++i == rx_ring->count)
4391                         i = 0;
4392 
4393                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4394                 prefetch(next_rxd);
4395 
4396                 next_buffer = &rx_ring->buffer_info[i];
4397 
4398                 cleaned = true;
4399                 cleaned_count++;
4400 
4401                 /* !EOP means multiple descriptors were used to store a single
4402                  * packet, if thats the case we need to toss it.  In fact, we
4403                  * to toss every packet with the EOP bit clear and the next
4404                  * frame that _does_ have the EOP bit set, as it is by
4405                  * definition only a frame fragment
4406                  */
4407                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4408                         adapter->discarding = true;
4409 
4410                 if (adapter->discarding) {
4411                         /* All receives must fit into a single buffer */
4412                         netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4413                         dev_kfree_skb(skb);
4414                         if (status & E1000_RXD_STAT_EOP)
4415                                 adapter->discarding = false;
4416                         goto next_desc;
4417                 }
4418 
4419                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4420                         if (e1000_tbi_should_accept(adapter, status,
4421                                                     rx_desc->errors,
4422                                                     length, data)) {
4423                                 length--;
4424                         } else if (netdev->features & NETIF_F_RXALL) {
4425                                 goto process_skb;
4426                         } else {
4427                                 dev_kfree_skb(skb);
4428                                 goto next_desc;
4429                         }
4430                 }
4431 
4432 process_skb:
4433                 total_rx_bytes += (length - 4); /* don't count FCS */
4434                 total_rx_packets++;
4435 
4436                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4437                         /* adjust length to remove Ethernet CRC, this must be
4438                          * done after the TBI_ACCEPT workaround above
4439                          */
4440                         length -= 4;
4441 
4442                 if (buffer_info->rxbuf.data == NULL)
4443                         skb_put(skb, length);
4444                 else /* copybreak skb */
4445                         skb_trim(skb, length);
4446 
4447                 /* Receive Checksum Offload */
4448                 e1000_rx_checksum(adapter,
4449                                   (u32)(status) |
4450                                   ((u32)(rx_desc->errors) << 24),
4451                                   le16_to_cpu(rx_desc->csum), skb);
4452 
4453                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4454 
4455 next_desc:
4456                 rx_desc->status = 0;
4457 
4458                 /* return some buffers to hardware, one at a time is too slow */
4459                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4460                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4461                         cleaned_count = 0;
4462                 }
4463 
4464                 /* use prefetched values */
4465                 rx_desc = next_rxd;
4466                 buffer_info = next_buffer;
4467         }
4468         rx_ring->next_to_clean = i;
4469 
4470         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4471         if (cleaned_count)
4472                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4473 
4474         adapter->total_rx_packets += total_rx_packets;
4475         adapter->total_rx_bytes += total_rx_bytes;
4476         netdev->stats.rx_bytes += total_rx_bytes;
4477         netdev->stats.rx_packets += total_rx_packets;
4478         return cleaned;
4479 }
4480 
4481 /**
4482  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4483  * @adapter: address of board private structure
4484  * @rx_ring: pointer to receive ring structure
4485  * @cleaned_count: number of buffers to allocate this pass
4486  **/
4487 static void
4488 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4489                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4490 {
4491         struct pci_dev *pdev = adapter->pdev;
4492         struct e1000_rx_desc *rx_desc;
4493         struct e1000_rx_buffer *buffer_info;
4494         unsigned int i;
4495 
4496         i = rx_ring->next_to_use;
4497         buffer_info = &rx_ring->buffer_info[i];
4498 
4499         while (cleaned_count--) {
4500                 /* allocate a new page if necessary */
4501                 if (!buffer_info->rxbuf.page) {
4502                         buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4503                         if (unlikely(!buffer_info->rxbuf.page)) {
4504                                 adapter->alloc_rx_buff_failed++;
4505                                 break;
4506                         }
4507                 }
4508 
4509                 if (!buffer_info->dma) {
4510                         buffer_info->dma = dma_map_page(&pdev->dev,
4511                                                         buffer_info->rxbuf.page, 0,
4512                                                         adapter->rx_buffer_len,
4513                                                         DMA_FROM_DEVICE);
4514                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4515                                 put_page(buffer_info->rxbuf.page);
4516                                 buffer_info->rxbuf.page = NULL;
4517                                 buffer_info->dma = 0;
4518                                 adapter->alloc_rx_buff_failed++;
4519                                 break;
4520                         }
4521                 }
4522 
4523                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4524                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4525 
4526                 if (unlikely(++i == rx_ring->count))
4527                         i = 0;
4528                 buffer_info = &rx_ring->buffer_info[i];
4529         }
4530 
4531         if (likely(rx_ring->next_to_use != i)) {
4532                 rx_ring->next_to_use = i;
4533                 if (unlikely(i-- == 0))
4534                         i = (rx_ring->count - 1);
4535 
4536                 /* Force memory writes to complete before letting h/w
4537                  * know there are new descriptors to fetch.  (Only
4538                  * applicable for weak-ordered memory model archs,
4539                  * such as IA-64).
4540                  */
4541                 dma_wmb();
4542                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4543         }
4544 }
4545 
4546 /**
4547  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4548  * @adapter: address of board private structure
4549  **/
4550 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4551                                    struct e1000_rx_ring *rx_ring,
4552                                    int cleaned_count)
4553 {
4554         struct e1000_hw *hw = &adapter->hw;
4555         struct pci_dev *pdev = adapter->pdev;
4556         struct e1000_rx_desc *rx_desc;
4557         struct e1000_rx_buffer *buffer_info;
4558         unsigned int i;
4559         unsigned int bufsz = adapter->rx_buffer_len;
4560 
4561         i = rx_ring->next_to_use;
4562         buffer_info = &rx_ring->buffer_info[i];
4563 
4564         while (cleaned_count--) {
4565                 void *data;
4566 
4567                 if (buffer_info->rxbuf.data)
4568                         goto skip;
4569 
4570                 data = e1000_alloc_frag(adapter);
4571                 if (!data) {
4572                         /* Better luck next round */
4573                         adapter->alloc_rx_buff_failed++;
4574                         break;
4575                 }
4576 
4577                 /* Fix for errata 23, can't cross 64kB boundary */
4578                 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4579                         void *olddata = data;
4580                         e_err(rx_err, "skb align check failed: %u bytes at "
4581                               "%p\n", bufsz, data);
4582                         /* Try again, without freeing the previous */
4583                         data = e1000_alloc_frag(adapter);
4584                         /* Failed allocation, critical failure */
4585                         if (!data) {
4586                                 skb_free_frag(olddata);
4587                                 adapter->alloc_rx_buff_failed++;
4588                                 break;
4589                         }
4590 
4591                         if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4592                                 /* give up */
4593                                 skb_free_frag(data);
4594                                 skb_free_frag(olddata);
4595                                 adapter->alloc_rx_buff_failed++;
4596                                 break;
4597                         }
4598 
4599                         /* Use new allocation */
4600                         skb_free_frag(olddata);
4601                 }
4602                 buffer_info->dma = dma_map_single(&pdev->dev,
4603                                                   data,
4604                                                   adapter->rx_buffer_len,
4605                                                   DMA_FROM_DEVICE);
4606                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4607                         skb_free_frag(data);
4608                         buffer_info->dma = 0;
4609                         adapter->alloc_rx_buff_failed++;
4610                         break;
4611                 }
4612 
4613                 /* XXX if it was allocated cleanly it will never map to a
4614                  * boundary crossing
4615                  */
4616 
4617                 /* Fix for errata 23, can't cross 64kB boundary */
4618                 if (!e1000_check_64k_bound(adapter,
4619                                         (void *)(unsigned long)buffer_info->dma,
4620                                         adapter->rx_buffer_len)) {
4621                         e_err(rx_err, "dma align check failed: %u bytes at "
4622                               "%p\n", adapter->rx_buffer_len,
4623                               (void *)(unsigned long)buffer_info->dma);
4624 
4625                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4626                                          adapter->rx_buffer_len,
4627                                          DMA_FROM_DEVICE);
4628 
4629                         skb_free_frag(data);
4630                         buffer_info->rxbuf.data = NULL;
4631                         buffer_info->dma = 0;
4632 
4633                         adapter->alloc_rx_buff_failed++;
4634                         break;
4635                 }
4636                 buffer_info->rxbuf.data = data;
4637  skip:
4638                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4639                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4640 
4641                 if (unlikely(++i == rx_ring->count))
4642                         i = 0;
4643                 buffer_info = &rx_ring->buffer_info[i];
4644         }
4645 
4646         if (likely(rx_ring->next_to_use != i)) {
4647                 rx_ring->next_to_use = i;
4648                 if (unlikely(i-- == 0))
4649                         i = (rx_ring->count - 1);
4650 
4651                 /* Force memory writes to complete before letting h/w
4652                  * know there are new descriptors to fetch.  (Only
4653                  * applicable for weak-ordered memory model archs,
4654                  * such as IA-64).
4655                  */
4656                 dma_wmb();
4657                 writel(i, hw->hw_addr + rx_ring->rdt);
4658         }
4659 }
4660 
4661 /**
4662  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4663  * @adapter:
4664  **/
4665 static void e1000_smartspeed(struct e1000_adapter *adapter)
4666 {
4667         struct e1000_hw *hw = &adapter->hw;
4668         u16 phy_status;
4669         u16 phy_ctrl;
4670 
4671         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4672            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4673                 return;
4674 
4675         if (adapter->smartspeed == 0) {
4676                 /* If Master/Slave config fault is asserted twice,
4677                  * we assume back-to-back
4678                  */
4679                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4680                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4681                         return;
4682                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4683                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4684                         return;
4685                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4686                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4687                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4688                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4689                                             phy_ctrl);
4690                         adapter->smartspeed++;
4691                         if (!e1000_phy_setup_autoneg(hw) &&
4692                            !e1000_read_phy_reg(hw, PHY_CTRL,
4693                                                &phy_ctrl)) {
4694                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4695                                              MII_CR_RESTART_AUTO_NEG);
4696                                 e1000_write_phy_reg(hw, PHY_CTRL,
4697                                                     phy_ctrl);
4698                         }
4699                 }
4700                 return;
4701         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4702                 /* If still no link, perhaps using 2/3 pair cable */
4703                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4704                 phy_ctrl |= CR_1000T_MS_ENABLE;
4705                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4706                 if (!e1000_phy_setup_autoneg(hw) &&
4707                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4708                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4709                                      MII_CR_RESTART_AUTO_NEG);
4710                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4711                 }
4712         }
4713         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4714         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4715                 adapter->smartspeed = 0;
4716 }
4717 
4718 /**
4719  * e1000_ioctl -
4720  * @netdev:
4721  * @ifreq:
4722  * @cmd:
4723  **/
4724 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4725 {
4726         switch (cmd) {
4727         case SIOCGMIIPHY:
4728         case SIOCGMIIREG:
4729         case SIOCSMIIREG:
4730                 return e1000_mii_ioctl(netdev, ifr, cmd);
4731         default:
4732                 return -EOPNOTSUPP;
4733         }
4734 }
4735 
4736 /**
4737  * e1000_mii_ioctl -
4738  * @netdev:
4739  * @ifreq:
4740  * @cmd:
4741  **/
4742 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4743                            int cmd)
4744 {
4745         struct e1000_adapter *adapter = netdev_priv(netdev);
4746         struct e1000_hw *hw = &adapter->hw;
4747         struct mii_ioctl_data *data = if_mii(ifr);
4748         int retval;
4749         u16 mii_reg;
4750         unsigned long flags;
4751 
4752         if (hw->media_type != e1000_media_type_copper)
4753                 return -EOPNOTSUPP;
4754 
4755         switch (cmd) {
4756         case SIOCGMIIPHY:
4757                 data->phy_id = hw->phy_addr;
4758                 break;
4759         case SIOCGMIIREG:
4760                 spin_lock_irqsave(&adapter->stats_lock, flags);
4761                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4762                                    &data->val_out)) {
4763                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4764                         return -EIO;
4765                 }
4766                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4767                 break;
4768         case SIOCSMIIREG:
4769                 if (data->reg_num & ~(0x1F))
4770                         return -EFAULT;
4771                 mii_reg = data->val_in;
4772                 spin_lock_irqsave(&adapter->stats_lock, flags);
4773                 if (e1000_write_phy_reg(hw, data->reg_num,
4774                                         mii_reg)) {
4775                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4776                         return -EIO;
4777                 }
4778                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4779                 if (hw->media_type == e1000_media_type_copper) {
4780                         switch (data->reg_num) {
4781                         case PHY_CTRL:
4782                                 if (mii_reg & MII_CR_POWER_DOWN)
4783                                         break;
4784                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4785                                         hw->autoneg = 1;
4786                                         hw->autoneg_advertised = 0x2F;
4787                                 } else {
4788                                         u32 speed;
4789                                         if (mii_reg & 0x40)
4790                                                 speed = SPEED_1000;
4791                                         else if (mii_reg & 0x2000)
4792                                                 speed = SPEED_100;
4793                                         else
4794                                                 speed = SPEED_10;
4795                                         retval = e1000_set_spd_dplx(
4796                                                 adapter, speed,
4797                                                 ((mii_reg & 0x100)
4798                                                  ? DUPLEX_FULL :
4799                                                  DUPLEX_HALF));
4800                                         if (retval)
4801                                                 return retval;
4802                                 }
4803                                 if (netif_running(adapter->netdev))
4804                                         e1000_reinit_locked(adapter);
4805                                 else
4806                                         e1000_reset(adapter);
4807                                 break;
4808                         case M88E1000_PHY_SPEC_CTRL:
4809                         case M88E1000_EXT_PHY_SPEC_CTRL:
4810                                 if (e1000_phy_reset(hw))
4811                                         return -EIO;
4812                                 break;
4813                         }
4814                 } else {
4815                         switch (data->reg_num) {
4816                         case PHY_CTRL:
4817                                 if (mii_reg & MII_CR_POWER_DOWN)
4818                                         break;
4819                                 if (netif_running(adapter->netdev))
4820                                         e1000_reinit_locked(adapter);
4821                                 else
4822                                         e1000_reset(adapter);
4823                                 break;
4824                         }
4825                 }
4826                 break;
4827         default:
4828                 return -EOPNOTSUPP;
4829         }
4830         return E1000_SUCCESS;
4831 }
4832 
4833 void e1000_pci_set_mwi(struct e1000_hw *hw)
4834 {
4835         struct e1000_adapter *adapter = hw->back;
4836         int ret_val = pci_set_mwi(adapter->pdev);
4837 
4838         if (ret_val)
4839                 e_err(probe, "Error in setting MWI\n");
4840 }
4841 
4842 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4843 {
4844         struct e1000_adapter *adapter = hw->back;
4845 
4846         pci_clear_mwi(adapter->pdev);
4847 }
4848 
4849 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4850 {
4851         struct e1000_adapter *adapter = hw->back;
4852         return pcix_get_mmrbc(adapter->pdev);
4853 }
4854 
4855 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4856 {
4857         struct e1000_adapter *adapter = hw->back;
4858         pcix_set_mmrbc(adapter->pdev, mmrbc);
4859 }
4860 
4861 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4862 {
4863         outl(value, port);
4864 }
4865 
4866 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4867 {
4868         u16 vid;
4869 
4870         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4871                 return true;
4872         return false;
4873 }
4874 
4875 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4876                               netdev_features_t features)
4877 {
4878         struct e1000_hw *hw = &adapter->hw;
4879         u32 ctrl;
4880 
4881         ctrl = er32(CTRL);
4882         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4883                 /* enable VLAN tag insert/strip */
4884                 ctrl |= E1000_CTRL_VME;
4885         } else {
4886                 /* disable VLAN tag insert/strip */
4887                 ctrl &= ~E1000_CTRL_VME;
4888         }
4889         ew32(CTRL, ctrl);
4890 }
4891 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4892                                      bool filter_on)
4893 {
4894         struct e1000_hw *hw = &adapter->hw;
4895         u32 rctl;
4896 
4897         if (!test_bit(__E1000_DOWN, &adapter->flags))
4898                 e1000_irq_disable(adapter);
4899 
4900         __e1000_vlan_mode(adapter, adapter->netdev->features);
4901         if (filter_on) {
4902                 /* enable VLAN receive filtering */
4903                 rctl = er32(RCTL);
4904                 rctl &= ~E1000_RCTL_CFIEN;
4905                 if (!(adapter->netdev->flags & IFF_PROMISC))
4906                         rctl |= E1000_RCTL_VFE;
4907                 ew32(RCTL, rctl);
4908                 e1000_update_mng_vlan(adapter);
4909         } else {
4910                 /* disable VLAN receive filtering */
4911                 rctl = er32(RCTL);
4912                 rctl &= ~E1000_RCTL_VFE;
4913                 ew32(RCTL, rctl);
4914         }
4915 
4916         if (!test_bit(__E1000_DOWN, &adapter->flags))
4917                 e1000_irq_enable(adapter);
4918 }
4919 
4920 static void e1000_vlan_mode(struct net_device *netdev,
4921                             netdev_features_t features)
4922 {
4923         struct e1000_adapter *adapter = netdev_priv(netdev);
4924 
4925         if (!test_bit(__E1000_DOWN, &adapter->flags))
4926                 e1000_irq_disable(adapter);
4927 
4928         __e1000_vlan_mode(adapter, features);
4929 
4930         if (!test_bit(__E1000_DOWN, &adapter->flags))
4931                 e1000_irq_enable(adapter);
4932 }
4933 
4934 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4935                                  __be16 proto, u16 vid)
4936 {
4937         struct e1000_adapter *adapter = netdev_priv(netdev);
4938         struct e1000_hw *hw = &adapter->hw;
4939         u32 vfta, index;
4940 
4941         if ((hw->mng_cookie.status &
4942              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4943             (vid == adapter->mng_vlan_id))
4944                 return 0;
4945 
4946         if (!e1000_vlan_used(adapter))
4947                 e1000_vlan_filter_on_off(adapter, true);
4948 
4949         /* add VID to filter table */
4950         index = (vid >> 5) & 0x7F;
4951         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4952         vfta |= (1 << (vid & 0x1F));
4953         e1000_write_vfta(hw, index, vfta);
4954 
4955         set_bit(vid, adapter->active_vlans);
4956 
4957         return 0;
4958 }
4959 
4960 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4961                                   __be16 proto, u16 vid)
4962 {
4963         struct e1000_adapter *adapter = netdev_priv(netdev);
4964         struct e1000_hw *hw = &adapter->hw;
4965         u32 vfta, index;
4966 
4967         if (!test_bit(__E1000_DOWN, &adapter->flags))
4968                 e1000_irq_disable(adapter);
4969         if (!test_bit(__E1000_DOWN, &adapter->flags))
4970                 e1000_irq_enable(adapter);
4971 
4972         /* remove VID from filter table */
4973         index = (vid >> 5) & 0x7F;
4974         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4975         vfta &= ~(1 << (vid & 0x1F));
4976         e1000_write_vfta(hw, index, vfta);
4977 
4978         clear_bit(vid, adapter->active_vlans);
4979 
4980         if (!e1000_vlan_used(adapter))
4981                 e1000_vlan_filter_on_off(adapter, false);
4982 
4983         return 0;
4984 }
4985 
4986 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4987 {
4988         u16 vid;
4989 
4990         if (!e1000_vlan_used(adapter))
4991                 return;
4992 
4993         e1000_vlan_filter_on_off(adapter, true);
4994         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4995                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4996 }
4997 
4998 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4999 {
5000         struct e1000_hw *hw = &adapter->hw;
5001 
5002         hw->autoneg = 0;
5003 
5004         /* Make sure dplx is at most 1 bit and lsb of speed is not set
5005          * for the switch() below to work
5006          */
5007         if ((spd & 1) || (dplx & ~1))
5008                 goto err_inval;
5009 
5010         /* Fiber NICs only allow 1000 gbps Full duplex */
5011         if ((hw->media_type == e1000_media_type_fiber) &&
5012             spd != SPEED_1000 &&
5013             dplx != DUPLEX_FULL)
5014                 goto err_inval;
5015 
5016         switch (spd + dplx) {
5017         case SPEED_10 + DUPLEX_HALF:
5018                 hw->forced_speed_duplex = e1000_10_half;
5019                 break;
5020         case SPEED_10 + DUPLEX_FULL:
5021                 hw->forced_speed_duplex = e1000_10_full;
5022                 break;
5023         case SPEED_100 + DUPLEX_HALF:
5024                 hw->forced_speed_duplex = e1000_100_half;
5025                 break;
5026         case SPEED_100 + DUPLEX_FULL:
5027                 hw->forced_speed_duplex = e1000_100_full;
5028                 break;
5029         case SPEED_1000 + DUPLEX_FULL:
5030                 hw->autoneg = 1;
5031                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5032                 break;
5033         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5034         default:
5035                 goto err_inval;
5036         }
5037 
5038         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5039         hw->mdix = AUTO_ALL_MODES;
5040 
5041         return 0;
5042 
5043 err_inval:
5044         e_err(probe, "Unsupported Speed/Duplex configuration\n");
5045         return -EINVAL;
5046 }
5047 
5048 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5049 {
5050         struct net_device *netdev = pci_get_drvdata(pdev);
5051         struct e1000_adapter *adapter = netdev_priv(netdev);
5052         struct e1000_hw *hw = &adapter->hw;
5053         u32 ctrl, ctrl_ext, rctl, status;
5054         u32 wufc = adapter->wol;
5055 #ifdef CONFIG_PM
5056         int retval = 0;
5057 #endif
5058 
5059         netif_device_detach(netdev);
5060 
5061         if (netif_running(netdev)) {
5062                 int count = E1000_CHECK_RESET_COUNT;
5063 
5064                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5065                         usleep_range(10000, 20000);
5066 
5067                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5068                 e1000_down(adapter);
5069         }
5070 
5071 #ifdef CONFIG_PM
5072         retval = pci_save_state(pdev);
5073         if (retval)
5074                 return retval;
5075 #endif
5076 
5077         status = er32(STATUS);
5078         if (status & E1000_STATUS_LU)
5079                 wufc &= ~E1000_WUFC_LNKC;
5080 
5081         if (wufc) {
5082                 e1000_setup_rctl(adapter);
5083                 e1000_set_rx_mode(netdev);
5084 
5085                 rctl = er32(RCTL);
5086 
5087                 /* turn on all-multi mode if wake on multicast is enabled */
5088                 if (wufc & E1000_WUFC_MC)
5089                         rctl |= E1000_RCTL_MPE;
5090 
5091                 /* enable receives in the hardware */
5092                 ew32(RCTL, rctl | E1000_RCTL_EN);
5093 
5094                 if (hw->mac_type >= e1000_82540) {
5095                         ctrl = er32(CTRL);
5096                         /* advertise wake from D3Cold */
5097                         #define E1000_CTRL_ADVD3WUC 0x00100000
5098                         /* phy power management enable */
5099                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5100                         ctrl |= E1000_CTRL_ADVD3WUC |
5101                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5102                         ew32(CTRL, ctrl);
5103                 }
5104 
5105                 if (hw->media_type == e1000_media_type_fiber ||
5106                     hw->media_type == e1000_media_type_internal_serdes) {
5107                         /* keep the laser running in D3 */
5108                         ctrl_ext = er32(CTRL_EXT);
5109                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5110                         ew32(CTRL_EXT, ctrl_ext);
5111                 }
5112 
5113                 ew32(WUC, E1000_WUC_PME_EN);
5114                 ew32(WUFC, wufc);
5115         } else {
5116                 ew32(WUC, 0);
5117                 ew32(WUFC, 0);
5118         }
5119 
5120         e1000_release_manageability(adapter);
5121 
5122         *enable_wake = !!wufc;
5123 
5124         /* make sure adapter isn't asleep if manageability is enabled */
5125         if (adapter->en_mng_pt)
5126                 *enable_wake = true;
5127 
5128         if (netif_running(netdev))
5129                 e1000_free_irq(adapter);
5130 
5131         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5132                 pci_disable_device(pdev);
5133 
5134         return 0;
5135 }
5136 
5137 #ifdef CONFIG_PM
5138 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5139 {
5140         int retval;
5141         bool wake;
5142 
5143         retval = __e1000_shutdown(pdev, &wake);
5144         if (retval)
5145                 return retval;
5146 
5147         if (wake) {
5148                 pci_prepare_to_sleep(pdev);
5149         } else {
5150                 pci_wake_from_d3(pdev, false);
5151                 pci_set_power_state(pdev, PCI_D3hot);
5152         }
5153 
5154         return 0;
5155 }
5156 
5157 static int e1000_resume(struct pci_dev *pdev)
5158 {
5159         struct net_device *netdev = pci_get_drvdata(pdev);
5160         struct e1000_adapter *adapter = netdev_priv(netdev);
5161         struct e1000_hw *hw = &adapter->hw;
5162         u32 err;
5163 
5164         pci_set_power_state(pdev, PCI_D0);
5165         pci_restore_state(pdev);
5166         pci_save_state(pdev);
5167 
5168         if (adapter->need_ioport)
5169                 err = pci_enable_device(pdev);
5170         else
5171                 err = pci_enable_device_mem(pdev);
5172         if (err) {
5173                 pr_err("Cannot enable PCI device from suspend\n");
5174                 return err;
5175         }
5176 
5177         /* flush memory to make sure state is correct */
5178         smp_mb__before_atomic();
5179         clear_bit(__E1000_DISABLED, &adapter->flags);
5180         pci_set_master(pdev);
5181 
5182         pci_enable_wake(pdev, PCI_D3hot, 0);
5183         pci_enable_wake(pdev, PCI_D3cold, 0);
5184 
5185         if (netif_running(netdev)) {
5186                 err = e1000_request_irq(adapter);
5187                 if (err)
5188                         return err;
5189         }
5190 
5191         e1000_power_up_phy(adapter);
5192         e1000_reset(adapter);
5193         ew32(WUS, ~0);
5194 
5195         e1000_init_manageability(adapter);
5196 
5197         if (netif_running(netdev))
5198                 e1000_up(adapter);
5199 
5200         netif_device_attach(netdev);
5201 
5202         return 0;
5203 }
5204 #endif
5205 
5206 static void e1000_shutdown(struct pci_dev *pdev)
5207 {
5208         bool wake;
5209 
5210         __e1000_shutdown(pdev, &wake);
5211 
5212         if (system_state == SYSTEM_POWER_OFF) {
5213                 pci_wake_from_d3(pdev, wake);
5214                 pci_set_power_state(pdev, PCI_D3hot);
5215         }
5216 }
5217 
5218 #ifdef CONFIG_NET_POLL_CONTROLLER
5219 /* Polling 'interrupt' - used by things like netconsole to send skbs
5220  * without having to re-enable interrupts. It's not called while
5221  * the interrupt routine is executing.
5222  */
5223 static void e1000_netpoll(struct net_device *netdev)
5224 {
5225         struct e1000_adapter *adapter = netdev_priv(netdev);
5226 
5227         if (disable_hardirq(adapter->pdev->irq))
5228                 e1000_intr(adapter->pdev->irq, netdev);
5229         enable_irq(adapter->pdev->irq);
5230 }
5231 #endif
5232 
5233 /**
5234  * e1000_io_error_detected - called when PCI error is detected
5235  * @pdev: Pointer to PCI device
5236  * @state: The current pci connection state
5237  *
5238  * This function is called after a PCI bus error affecting
5239  * this device has been detected.
5240  */
5241 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5242                                                 pci_channel_state_t state)
5243 {
5244         struct net_device *netdev = pci_get_drvdata(pdev);
5245         struct e1000_adapter *adapter = netdev_priv(netdev);
5246 
5247         netif_device_detach(netdev);
5248 
5249         if (state == pci_channel_io_perm_failure)
5250                 return PCI_ERS_RESULT_DISCONNECT;
5251 
5252         if (netif_running(netdev))
5253                 e1000_down(adapter);
5254 
5255         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5256                 pci_disable_device(pdev);
5257 
5258         /* Request a slot slot reset. */
5259         return PCI_ERS_RESULT_NEED_RESET;
5260 }
5261 
5262 /**
5263  * e1000_io_slot_reset - called after the pci bus has been reset.
5264  * @pdev: Pointer to PCI device
5265  *
5266  * Restart the card from scratch, as if from a cold-boot. Implementation
5267  * resembles the first-half of the e1000_resume routine.
5268  */
5269 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5270 {
5271         struct net_device *netdev = pci_get_drvdata(pdev);
5272         struct e1000_adapter *adapter = netdev_priv(netdev);
5273         struct e1000_hw *hw = &adapter->hw;
5274         int err;
5275 
5276         if (adapter->need_ioport)
5277                 err = pci_enable_device(pdev);
5278         else
5279                 err = pci_enable_device_mem(pdev);
5280         if (err) {
5281                 pr_err("Cannot re-enable PCI device after reset.\n");
5282                 return PCI_ERS_RESULT_DISCONNECT;
5283         }
5284 
5285         /* flush memory to make sure state is correct */
5286         smp_mb__before_atomic();
5287         clear_bit(__E1000_DISABLED, &adapter->flags);
5288         pci_set_master(pdev);
5289 
5290         pci_enable_wake(pdev, PCI_D3hot, 0);
5291         pci_enable_wake(pdev, PCI_D3cold, 0);
5292 
5293         e1000_reset(adapter);
5294         ew32(WUS, ~0);
5295 
5296         return PCI_ERS_RESULT_RECOVERED;
5297 }
5298 
5299 /**
5300  * e1000_io_resume - called when traffic can start flowing again.
5301  * @pdev: Pointer to PCI device
5302  *
5303  * This callback is called when the error recovery driver tells us that
5304  * its OK to resume normal operation. Implementation resembles the
5305  * second-half of the e1000_resume routine.
5306  */
5307 static void e1000_io_resume(struct pci_dev *pdev)
5308 {
5309         struct net_device *netdev = pci_get_drvdata(pdev);
5310         struct e1000_adapter *adapter = netdev_priv(netdev);
5311 
5312         e1000_init_manageability(adapter);
5313 
5314         if (netif_running(netdev)) {
5315                 if (e1000_up(adapter)) {
5316                         pr_info("can't bring device back up after reset\n");
5317                         return;
5318                 }
5319         }
5320 
5321         netif_device_attach(netdev);
5322 }
5323 
5324 /* e1000_main.c */

/* [<][>][^][v][top][bottom][index][help] */