root/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. link_report
  2. enable_tx_fifo_drain
  3. disable_tx_fifo_drain
  4. t3_os_link_fault
  5. t3_os_link_changed
  6. t3_os_phymod_changed
  7. cxgb_set_rxmode
  8. link_start
  9. cxgb_disable_msi
  10. t3_async_intr_handler
  11. name_msix_vecs
  12. request_msix_data_irqs
  13. free_irq_resources
  14. await_mgmt_replies
  15. init_tp_parity
  16. setup_rss
  17. ring_dbs
  18. init_napi
  19. quiesce_rx
  20. enable_all_napi
  21. setup_sge_qsets
  22. attr_show
  23. attr_store
  24. set_nfilters
  25. store_nfilters
  26. set_nservers
  27. store_nservers
  28. tm_attr_show
  29. tm_attr_store
  30. offload_tx
  31. write_smt_entry
  32. init_smt
  33. init_port_mtus
  34. send_pktsched_cmd
  35. bind_qsets
  36. get_edc_fw_name
  37. t3_get_edc_fw
  38. upgrade_fw
  39. t3rev2char
  40. update_tpsram
  41. t3_synchronize_rx
  42. cxgb_vlan_mode
  43. cxgb_up
  44. cxgb_down
  45. schedule_chk_task
  46. offload_open
  47. offload_close
  48. cxgb_open
  49. __cxgb_close
  50. cxgb_close
  51. cxgb_get_stats
  52. get_msglevel
  53. set_msglevel
  54. get_sset_count
  55. get_regs_len
  56. get_eeprom_len
  57. get_drvinfo
  58. get_strings
  59. collect_sge_port_stats
  60. get_stats
  61. reg_block_dump
  62. get_regs
  63. restart_autoneg
  64. set_phys_id
  65. get_link_ksettings
  66. speed_duplex_to_caps
  67. set_link_ksettings
  68. get_pauseparam
  69. set_pauseparam
  70. get_sge_param
  71. set_sge_param
  72. set_coalesce
  73. get_coalesce
  74. get_eeprom
  75. set_eeprom
  76. get_wol
  77. in_range
  78. cxgb_extension_ioctl
  79. cxgb_ioctl
  80. cxgb_change_mtu
  81. cxgb_set_mac_addr
  82. cxgb_fix_features
  83. cxgb_set_features
  84. cxgb_netpoll
  85. mac_stats_update
  86. check_link_status
  87. check_t3b2_mac
  88. t3_adap_check_task
  89. db_full_task
  90. db_empty_task
  91. db_drop_task
  92. ext_intr_task
  93. t3_os_ext_intr_handler
  94. t3_os_link_fault_handler
  95. t3_adapter_error
  96. t3_reenable_adapter
  97. t3_resume_ports
  98. fatal_error_task
  99. t3_fatal_err
  100. t3_io_error_detected
  101. t3_io_slot_reset
  102. t3_io_resume
  103. set_nqsets
  104. cxgb_enable_msix
  105. print_port_info
  106. cxgb3_init_iscsi_mac
  107. init_one
  108. remove_one
  109. cxgb3_init_module
  110. cxgb3_cleanup_module

   1 /*
   2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34 
  35 #include <linux/module.h>
  36 #include <linux/init.h>
  37 #include <linux/pci.h>
  38 #include <linux/dma-mapping.h>
  39 #include <linux/netdevice.h>
  40 #include <linux/etherdevice.h>
  41 #include <linux/if_vlan.h>
  42 #include <linux/mdio.h>
  43 #include <linux/sockios.h>
  44 #include <linux/workqueue.h>
  45 #include <linux/proc_fs.h>
  46 #include <linux/rtnetlink.h>
  47 #include <linux/firmware.h>
  48 #include <linux/log2.h>
  49 #include <linux/stringify.h>
  50 #include <linux/sched.h>
  51 #include <linux/slab.h>
  52 #include <linux/uaccess.h>
  53 #include <linux/nospec.h>
  54 
  55 #include "common.h"
  56 #include "cxgb3_ioctl.h"
  57 #include "regs.h"
  58 #include "cxgb3_offload.h"
  59 #include "version.h"
  60 
  61 #include "cxgb3_ctl_defs.h"
  62 #include "t3_cpl.h"
  63 #include "firmware_exports.h"
  64 
  65 enum {
  66         MAX_TXQ_ENTRIES = 16384,
  67         MAX_CTRL_TXQ_ENTRIES = 1024,
  68         MAX_RSPQ_ENTRIES = 16384,
  69         MAX_RX_BUFFERS = 16384,
  70         MAX_RX_JUMBO_BUFFERS = 16384,
  71         MIN_TXQ_ENTRIES = 4,
  72         MIN_CTRL_TXQ_ENTRIES = 4,
  73         MIN_RSPQ_ENTRIES = 32,
  74         MIN_FL_ENTRIES = 32
  75 };
  76 
  77 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78 
  79 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82 
  83 #define EEPROM_MAGIC 0x38E2F10C
  84 
  85 #define CH_DEVICE(devid, idx) \
  86         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87 
  88 static const struct pci_device_id cxgb3_pci_tbl[] = {
  89         CH_DEVICE(0x20, 0),     /* PE9000 */
  90         CH_DEVICE(0x21, 1),     /* T302E */
  91         CH_DEVICE(0x22, 2),     /* T310E */
  92         CH_DEVICE(0x23, 3),     /* T320X */
  93         CH_DEVICE(0x24, 1),     /* T302X */
  94         CH_DEVICE(0x25, 3),     /* T320E */
  95         CH_DEVICE(0x26, 2),     /* T310X */
  96         CH_DEVICE(0x30, 2),     /* T3B10 */
  97         CH_DEVICE(0x31, 3),     /* T3B20 */
  98         CH_DEVICE(0x32, 1),     /* T3B02 */
  99         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100         CH_DEVICE(0x36, 3),     /* S320E-CR */
 101         CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102         {0,}
 103 };
 104 
 105 MODULE_DESCRIPTION(DRV_DESC);
 106 MODULE_AUTHOR("Chelsio Communications");
 107 MODULE_LICENSE("Dual BSD/GPL");
 108 MODULE_VERSION(DRV_VERSION);
 109 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 110 
 111 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 112 
 113 module_param(dflt_msg_enable, int, 0644);
 114 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 115 
 116 /*
 117  * The driver uses the best interrupt scheme available on a platform in the
 118  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 119  * of these schemes the driver may consider as follows:
 120  *
 121  * msi = 2: choose from among all three options
 122  * msi = 1: only consider MSI and pin interrupts
 123  * msi = 0: force pin interrupts
 124  */
 125 static int msi = 2;
 126 
 127 module_param(msi, int, 0644);
 128 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 129 
 130 /*
 131  * The driver enables offload as a default.
 132  * To disable it, use ofld_disable = 1.
 133  */
 134 
 135 static int ofld_disable = 0;
 136 
 137 module_param(ofld_disable, int, 0644);
 138 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 139 
 140 /*
 141  * We have work elements that we need to cancel when an interface is taken
 142  * down.  Normally the work elements would be executed by keventd but that
 143  * can deadlock because of linkwatch.  If our close method takes the rtnl
 144  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 145  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 146  * for our work to complete.  Get our own work queue to solve this.
 147  */
 148 struct workqueue_struct *cxgb3_wq;
 149 
 150 /**
 151  *      link_report - show link status and link speed/duplex
 152  *      @p: the port whose settings are to be reported
 153  *
 154  *      Shows the link status, speed, and duplex of a port.
 155  */
 156 static void link_report(struct net_device *dev)
 157 {
 158         if (!netif_carrier_ok(dev))
 159                 netdev_info(dev, "link down\n");
 160         else {
 161                 const char *s = "10Mbps";
 162                 const struct port_info *p = netdev_priv(dev);
 163 
 164                 switch (p->link_config.speed) {
 165                 case SPEED_10000:
 166                         s = "10Gbps";
 167                         break;
 168                 case SPEED_1000:
 169                         s = "1000Mbps";
 170                         break;
 171                 case SPEED_100:
 172                         s = "100Mbps";
 173                         break;
 174                 }
 175 
 176                 netdev_info(dev, "link up, %s, %s-duplex\n",
 177                             s, p->link_config.duplex == DUPLEX_FULL
 178                             ? "full" : "half");
 179         }
 180 }
 181 
 182 static void enable_tx_fifo_drain(struct adapter *adapter,
 183                                  struct port_info *pi)
 184 {
 185         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 186                          F_ENDROPPKT);
 187         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 188         t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 189         t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 190 }
 191 
 192 static void disable_tx_fifo_drain(struct adapter *adapter,
 193                                   struct port_info *pi)
 194 {
 195         t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 196                          F_ENDROPPKT, 0);
 197 }
 198 
 199 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 200 {
 201         struct net_device *dev = adap->port[port_id];
 202         struct port_info *pi = netdev_priv(dev);
 203 
 204         if (state == netif_carrier_ok(dev))
 205                 return;
 206 
 207         if (state) {
 208                 struct cmac *mac = &pi->mac;
 209 
 210                 netif_carrier_on(dev);
 211 
 212                 disable_tx_fifo_drain(adap, pi);
 213 
 214                 /* Clear local faults */
 215                 t3_xgm_intr_disable(adap, pi->port_id);
 216                 t3_read_reg(adap, A_XGM_INT_STATUS +
 217                                     pi->mac.offset);
 218                 t3_write_reg(adap,
 219                              A_XGM_INT_CAUSE + pi->mac.offset,
 220                              F_XGM_INT);
 221 
 222                 t3_set_reg_field(adap,
 223                                  A_XGM_INT_ENABLE +
 224                                  pi->mac.offset,
 225                                  F_XGM_INT, F_XGM_INT);
 226                 t3_xgm_intr_enable(adap, pi->port_id);
 227 
 228                 t3_mac_enable(mac, MAC_DIRECTION_TX);
 229         } else {
 230                 netif_carrier_off(dev);
 231 
 232                 /* Flush TX FIFO */
 233                 enable_tx_fifo_drain(adap, pi);
 234         }
 235         link_report(dev);
 236 }
 237 
 238 /**
 239  *      t3_os_link_changed - handle link status changes
 240  *      @adapter: the adapter associated with the link change
 241  *      @port_id: the port index whose limk status has changed
 242  *      @link_stat: the new status of the link
 243  *      @speed: the new speed setting
 244  *      @duplex: the new duplex setting
 245  *      @pause: the new flow-control setting
 246  *
 247  *      This is the OS-dependent handler for link status changes.  The OS
 248  *      neutral handler takes care of most of the processing for these events,
 249  *      then calls this handler for any OS-specific processing.
 250  */
 251 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 252                         int speed, int duplex, int pause)
 253 {
 254         struct net_device *dev = adapter->port[port_id];
 255         struct port_info *pi = netdev_priv(dev);
 256         struct cmac *mac = &pi->mac;
 257 
 258         /* Skip changes from disabled ports. */
 259         if (!netif_running(dev))
 260                 return;
 261 
 262         if (link_stat != netif_carrier_ok(dev)) {
 263                 if (link_stat) {
 264                         disable_tx_fifo_drain(adapter, pi);
 265 
 266                         t3_mac_enable(mac, MAC_DIRECTION_RX);
 267 
 268                         /* Clear local faults */
 269                         t3_xgm_intr_disable(adapter, pi->port_id);
 270                         t3_read_reg(adapter, A_XGM_INT_STATUS +
 271                                     pi->mac.offset);
 272                         t3_write_reg(adapter,
 273                                      A_XGM_INT_CAUSE + pi->mac.offset,
 274                                      F_XGM_INT);
 275 
 276                         t3_set_reg_field(adapter,
 277                                          A_XGM_INT_ENABLE + pi->mac.offset,
 278                                          F_XGM_INT, F_XGM_INT);
 279                         t3_xgm_intr_enable(adapter, pi->port_id);
 280 
 281                         netif_carrier_on(dev);
 282                 } else {
 283                         netif_carrier_off(dev);
 284 
 285                         t3_xgm_intr_disable(adapter, pi->port_id);
 286                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 287                         t3_set_reg_field(adapter,
 288                                          A_XGM_INT_ENABLE + pi->mac.offset,
 289                                          F_XGM_INT, 0);
 290 
 291                         if (is_10G(adapter))
 292                                 pi->phy.ops->power_down(&pi->phy, 1);
 293 
 294                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 295                         t3_mac_disable(mac, MAC_DIRECTION_RX);
 296                         t3_link_start(&pi->phy, mac, &pi->link_config);
 297 
 298                         /* Flush TX FIFO */
 299                         enable_tx_fifo_drain(adapter, pi);
 300                 }
 301 
 302                 link_report(dev);
 303         }
 304 }
 305 
 306 /**
 307  *      t3_os_phymod_changed - handle PHY module changes
 308  *      @phy: the PHY reporting the module change
 309  *      @mod_type: new module type
 310  *
 311  *      This is the OS-dependent handler for PHY module changes.  It is
 312  *      invoked when a PHY module is removed or inserted for any OS-specific
 313  *      processing.
 314  */
 315 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 316 {
 317         static const char *mod_str[] = {
 318                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 319         };
 320 
 321         const struct net_device *dev = adap->port[port_id];
 322         const struct port_info *pi = netdev_priv(dev);
 323 
 324         if (pi->phy.modtype == phy_modtype_none)
 325                 netdev_info(dev, "PHY module unplugged\n");
 326         else
 327                 netdev_info(dev, "%s PHY module inserted\n",
 328                             mod_str[pi->phy.modtype]);
 329 }
 330 
 331 static void cxgb_set_rxmode(struct net_device *dev)
 332 {
 333         struct port_info *pi = netdev_priv(dev);
 334 
 335         t3_mac_set_rx_mode(&pi->mac, dev);
 336 }
 337 
 338 /**
 339  *      link_start - enable a port
 340  *      @dev: the device to enable
 341  *
 342  *      Performs the MAC and PHY actions needed to enable a port.
 343  */
 344 static void link_start(struct net_device *dev)
 345 {
 346         struct port_info *pi = netdev_priv(dev);
 347         struct cmac *mac = &pi->mac;
 348 
 349         t3_mac_reset(mac);
 350         t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 351         t3_mac_set_mtu(mac, dev->mtu);
 352         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 353         t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 354         t3_mac_set_rx_mode(mac, dev);
 355         t3_link_start(&pi->phy, mac, &pi->link_config);
 356         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 357 }
 358 
 359 static inline void cxgb_disable_msi(struct adapter *adapter)
 360 {
 361         if (adapter->flags & USING_MSIX) {
 362                 pci_disable_msix(adapter->pdev);
 363                 adapter->flags &= ~USING_MSIX;
 364         } else if (adapter->flags & USING_MSI) {
 365                 pci_disable_msi(adapter->pdev);
 366                 adapter->flags &= ~USING_MSI;
 367         }
 368 }
 369 
 370 /*
 371  * Interrupt handler for asynchronous events used with MSI-X.
 372  */
 373 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 374 {
 375         t3_slow_intr_handler(cookie);
 376         return IRQ_HANDLED;
 377 }
 378 
 379 /*
 380  * Name the MSI-X interrupts.
 381  */
 382 static void name_msix_vecs(struct adapter *adap)
 383 {
 384         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 385 
 386         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 387         adap->msix_info[0].desc[n] = 0;
 388 
 389         for_each_port(adap, j) {
 390                 struct net_device *d = adap->port[j];
 391                 const struct port_info *pi = netdev_priv(d);
 392 
 393                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 394                         snprintf(adap->msix_info[msi_idx].desc, n,
 395                                  "%s-%d", d->name, pi->first_qset + i);
 396                         adap->msix_info[msi_idx].desc[n] = 0;
 397                 }
 398         }
 399 }
 400 
 401 static int request_msix_data_irqs(struct adapter *adap)
 402 {
 403         int i, j, err, qidx = 0;
 404 
 405         for_each_port(adap, i) {
 406                 int nqsets = adap2pinfo(adap, i)->nqsets;
 407 
 408                 for (j = 0; j < nqsets; ++j) {
 409                         err = request_irq(adap->msix_info[qidx + 1].vec,
 410                                           t3_intr_handler(adap,
 411                                                           adap->sge.qs[qidx].
 412                                                           rspq.polling), 0,
 413                                           adap->msix_info[qidx + 1].desc,
 414                                           &adap->sge.qs[qidx]);
 415                         if (err) {
 416                                 while (--qidx >= 0)
 417                                         free_irq(adap->msix_info[qidx + 1].vec,
 418                                                  &adap->sge.qs[qidx]);
 419                                 return err;
 420                         }
 421                         qidx++;
 422                 }
 423         }
 424         return 0;
 425 }
 426 
 427 static void free_irq_resources(struct adapter *adapter)
 428 {
 429         if (adapter->flags & USING_MSIX) {
 430                 int i, n = 0;
 431 
 432                 free_irq(adapter->msix_info[0].vec, adapter);
 433                 for_each_port(adapter, i)
 434                         n += adap2pinfo(adapter, i)->nqsets;
 435 
 436                 for (i = 0; i < n; ++i)
 437                         free_irq(adapter->msix_info[i + 1].vec,
 438                                  &adapter->sge.qs[i]);
 439         } else
 440                 free_irq(adapter->pdev->irq, adapter);
 441 }
 442 
 443 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 444                               unsigned long n)
 445 {
 446         int attempts = 10;
 447 
 448         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 449                 if (!--attempts)
 450                         return -ETIMEDOUT;
 451                 msleep(10);
 452         }
 453         return 0;
 454 }
 455 
 456 static int init_tp_parity(struct adapter *adap)
 457 {
 458         int i;
 459         struct sk_buff *skb;
 460         struct cpl_set_tcb_field *greq;
 461         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 462 
 463         t3_tp_set_offload_mode(adap, 1);
 464 
 465         for (i = 0; i < 16; i++) {
 466                 struct cpl_smt_write_req *req;
 467 
 468                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 469                 if (!skb)
 470                         skb = adap->nofail_skb;
 471                 if (!skb)
 472                         goto alloc_skb_fail;
 473 
 474                 req = __skb_put_zero(skb, sizeof(*req));
 475                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 476                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 477                 req->mtu_idx = NMTUS - 1;
 478                 req->iff = i;
 479                 t3_mgmt_tx(adap, skb);
 480                 if (skb == adap->nofail_skb) {
 481                         await_mgmt_replies(adap, cnt, i + 1);
 482                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 483                         if (!adap->nofail_skb)
 484                                 goto alloc_skb_fail;
 485                 }
 486         }
 487 
 488         for (i = 0; i < 2048; i++) {
 489                 struct cpl_l2t_write_req *req;
 490 
 491                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 492                 if (!skb)
 493                         skb = adap->nofail_skb;
 494                 if (!skb)
 495                         goto alloc_skb_fail;
 496 
 497                 req = __skb_put_zero(skb, sizeof(*req));
 498                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 499                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 500                 req->params = htonl(V_L2T_W_IDX(i));
 501                 t3_mgmt_tx(adap, skb);
 502                 if (skb == adap->nofail_skb) {
 503                         await_mgmt_replies(adap, cnt, 16 + i + 1);
 504                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 505                         if (!adap->nofail_skb)
 506                                 goto alloc_skb_fail;
 507                 }
 508         }
 509 
 510         for (i = 0; i < 2048; i++) {
 511                 struct cpl_rte_write_req *req;
 512 
 513                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 514                 if (!skb)
 515                         skb = adap->nofail_skb;
 516                 if (!skb)
 517                         goto alloc_skb_fail;
 518 
 519                 req = __skb_put_zero(skb, sizeof(*req));
 520                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 521                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 522                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 523                 t3_mgmt_tx(adap, skb);
 524                 if (skb == adap->nofail_skb) {
 525                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 526                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 527                         if (!adap->nofail_skb)
 528                                 goto alloc_skb_fail;
 529                 }
 530         }
 531 
 532         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 533         if (!skb)
 534                 skb = adap->nofail_skb;
 535         if (!skb)
 536                 goto alloc_skb_fail;
 537 
 538         greq = __skb_put_zero(skb, sizeof(*greq));
 539         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 540         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 541         greq->mask = cpu_to_be64(1);
 542         t3_mgmt_tx(adap, skb);
 543 
 544         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 545         if (skb == adap->nofail_skb) {
 546                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 547                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 548         }
 549 
 550         t3_tp_set_offload_mode(adap, 0);
 551         return i;
 552 
 553 alloc_skb_fail:
 554         t3_tp_set_offload_mode(adap, 0);
 555         return -ENOMEM;
 556 }
 557 
 558 /**
 559  *      setup_rss - configure RSS
 560  *      @adap: the adapter
 561  *
 562  *      Sets up RSS to distribute packets to multiple receive queues.  We
 563  *      configure the RSS CPU lookup table to distribute to the number of HW
 564  *      receive queues, and the response queue lookup table to narrow that
 565  *      down to the response queues actually configured for each port.
 566  *      We always configure the RSS mapping for two ports since the mapping
 567  *      table has plenty of entries.
 568  */
 569 static void setup_rss(struct adapter *adap)
 570 {
 571         int i;
 572         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 573         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 574         u8 cpus[SGE_QSETS + 1];
 575         u16 rspq_map[RSS_TABLE_SIZE + 1];
 576 
 577         for (i = 0; i < SGE_QSETS; ++i)
 578                 cpus[i] = i;
 579         cpus[SGE_QSETS] = 0xff; /* terminator */
 580 
 581         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 582                 rspq_map[i] = i % nq0;
 583                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 584         }
 585         rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 586 
 587         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 588                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 589                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 590 }
 591 
 592 static void ring_dbs(struct adapter *adap)
 593 {
 594         int i, j;
 595 
 596         for (i = 0; i < SGE_QSETS; i++) {
 597                 struct sge_qset *qs = &adap->sge.qs[i];
 598 
 599                 if (qs->adap)
 600                         for (j = 0; j < SGE_TXQ_PER_SET; j++)
 601                                 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 602         }
 603 }
 604 
 605 static void init_napi(struct adapter *adap)
 606 {
 607         int i;
 608 
 609         for (i = 0; i < SGE_QSETS; i++) {
 610                 struct sge_qset *qs = &adap->sge.qs[i];
 611 
 612                 if (qs->adap)
 613                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 614                                        64);
 615         }
 616 
 617         /*
 618          * netif_napi_add() can be called only once per napi_struct because it
 619          * adds each new napi_struct to a list.  Be careful not to call it a
 620          * second time, e.g., during EEH recovery, by making a note of it.
 621          */
 622         adap->flags |= NAPI_INIT;
 623 }
 624 
 625 /*
 626  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 627  * both netdevices representing interfaces and the dummy ones for the extra
 628  * queues.
 629  */
 630 static void quiesce_rx(struct adapter *adap)
 631 {
 632         int i;
 633 
 634         for (i = 0; i < SGE_QSETS; i++)
 635                 if (adap->sge.qs[i].adap)
 636                         napi_disable(&adap->sge.qs[i].napi);
 637 }
 638 
 639 static void enable_all_napi(struct adapter *adap)
 640 {
 641         int i;
 642         for (i = 0; i < SGE_QSETS; i++)
 643                 if (adap->sge.qs[i].adap)
 644                         napi_enable(&adap->sge.qs[i].napi);
 645 }
 646 
 647 /**
 648  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 649  *      @adap: the adapter
 650  *
 651  *      Determines how many sets of SGE queues to use and initializes them.
 652  *      We support multiple queue sets per port if we have MSI-X, otherwise
 653  *      just one queue set per port.
 654  */
 655 static int setup_sge_qsets(struct adapter *adap)
 656 {
 657         int i, j, err, irq_idx = 0, qset_idx = 0;
 658         unsigned int ntxq = SGE_TXQ_PER_SET;
 659 
 660         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 661                 irq_idx = -1;
 662 
 663         for_each_port(adap, i) {
 664                 struct net_device *dev = adap->port[i];
 665                 struct port_info *pi = netdev_priv(dev);
 666 
 667                 pi->qs = &adap->sge.qs[pi->first_qset];
 668                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 669                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
 670                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
 671                                                              irq_idx,
 672                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
 673                                 netdev_get_tx_queue(dev, j));
 674                         if (err) {
 675                                 t3_free_sge_resources(adap);
 676                                 return err;
 677                         }
 678                 }
 679         }
 680 
 681         return 0;
 682 }
 683 
 684 static ssize_t attr_show(struct device *d, char *buf,
 685                          ssize_t(*format) (struct net_device *, char *))
 686 {
 687         ssize_t len;
 688 
 689         /* Synchronize with ioctls that may shut down the device */
 690         rtnl_lock();
 691         len = (*format) (to_net_dev(d), buf);
 692         rtnl_unlock();
 693         return len;
 694 }
 695 
 696 static ssize_t attr_store(struct device *d,
 697                           const char *buf, size_t len,
 698                           ssize_t(*set) (struct net_device *, unsigned int),
 699                           unsigned int min_val, unsigned int max_val)
 700 {
 701         ssize_t ret;
 702         unsigned int val;
 703 
 704         if (!capable(CAP_NET_ADMIN))
 705                 return -EPERM;
 706 
 707         ret = kstrtouint(buf, 0, &val);
 708         if (ret)
 709                 return ret;
 710         if (val < min_val || val > max_val)
 711                 return -EINVAL;
 712 
 713         rtnl_lock();
 714         ret = (*set) (to_net_dev(d), val);
 715         if (!ret)
 716                 ret = len;
 717         rtnl_unlock();
 718         return ret;
 719 }
 720 
 721 #define CXGB3_SHOW(name, val_expr) \
 722 static ssize_t format_##name(struct net_device *dev, char *buf) \
 723 { \
 724         struct port_info *pi = netdev_priv(dev); \
 725         struct adapter *adap = pi->adapter; \
 726         return sprintf(buf, "%u\n", val_expr); \
 727 } \
 728 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 729                            char *buf) \
 730 { \
 731         return attr_show(d, buf, format_##name); \
 732 }
 733 
 734 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 735 {
 736         struct port_info *pi = netdev_priv(dev);
 737         struct adapter *adap = pi->adapter;
 738         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 739 
 740         if (adap->flags & FULL_INIT_DONE)
 741                 return -EBUSY;
 742         if (val && adap->params.rev == 0)
 743                 return -EINVAL;
 744         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 745             min_tids)
 746                 return -EINVAL;
 747         adap->params.mc5.nfilters = val;
 748         return 0;
 749 }
 750 
 751 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 752                               const char *buf, size_t len)
 753 {
 754         return attr_store(d, buf, len, set_nfilters, 0, ~0);
 755 }
 756 
 757 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 758 {
 759         struct port_info *pi = netdev_priv(dev);
 760         struct adapter *adap = pi->adapter;
 761 
 762         if (adap->flags & FULL_INIT_DONE)
 763                 return -EBUSY;
 764         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 765             MC5_MIN_TIDS)
 766                 return -EINVAL;
 767         adap->params.mc5.nservers = val;
 768         return 0;
 769 }
 770 
 771 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 772                               const char *buf, size_t len)
 773 {
 774         return attr_store(d, buf, len, set_nservers, 0, ~0);
 775 }
 776 
 777 #define CXGB3_ATTR_R(name, val_expr) \
 778 CXGB3_SHOW(name, val_expr) \
 779 static DEVICE_ATTR(name, 0444, show_##name, NULL)
 780 
 781 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
 782 CXGB3_SHOW(name, val_expr) \
 783 static DEVICE_ATTR(name, 0644, show_##name, store_method)
 784 
 785 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 786 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 787 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 788 
 789 static struct attribute *cxgb3_attrs[] = {
 790         &dev_attr_cam_size.attr,
 791         &dev_attr_nfilters.attr,
 792         &dev_attr_nservers.attr,
 793         NULL
 794 };
 795 
 796 static const struct attribute_group cxgb3_attr_group = {
 797         .attrs = cxgb3_attrs,
 798 };
 799 
 800 static ssize_t tm_attr_show(struct device *d,
 801                             char *buf, int sched)
 802 {
 803         struct port_info *pi = netdev_priv(to_net_dev(d));
 804         struct adapter *adap = pi->adapter;
 805         unsigned int v, addr, bpt, cpt;
 806         ssize_t len;
 807 
 808         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 809         rtnl_lock();
 810         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 811         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 812         if (sched & 1)
 813                 v >>= 16;
 814         bpt = (v >> 8) & 0xff;
 815         cpt = v & 0xff;
 816         if (!cpt)
 817                 len = sprintf(buf, "disabled\n");
 818         else {
 819                 v = (adap->params.vpd.cclk * 1000) / cpt;
 820                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 821         }
 822         rtnl_unlock();
 823         return len;
 824 }
 825 
 826 static ssize_t tm_attr_store(struct device *d,
 827                              const char *buf, size_t len, int sched)
 828 {
 829         struct port_info *pi = netdev_priv(to_net_dev(d));
 830         struct adapter *adap = pi->adapter;
 831         unsigned int val;
 832         ssize_t ret;
 833 
 834         if (!capable(CAP_NET_ADMIN))
 835                 return -EPERM;
 836 
 837         ret = kstrtouint(buf, 0, &val);
 838         if (ret)
 839                 return ret;
 840         if (val > 10000000)
 841                 return -EINVAL;
 842 
 843         rtnl_lock();
 844         ret = t3_config_sched(adap, val, sched);
 845         if (!ret)
 846                 ret = len;
 847         rtnl_unlock();
 848         return ret;
 849 }
 850 
 851 #define TM_ATTR(name, sched) \
 852 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 853                            char *buf) \
 854 { \
 855         return tm_attr_show(d, buf, sched); \
 856 } \
 857 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 858                             const char *buf, size_t len) \
 859 { \
 860         return tm_attr_store(d, buf, len, sched); \
 861 } \
 862 static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 863 
 864 TM_ATTR(sched0, 0);
 865 TM_ATTR(sched1, 1);
 866 TM_ATTR(sched2, 2);
 867 TM_ATTR(sched3, 3);
 868 TM_ATTR(sched4, 4);
 869 TM_ATTR(sched5, 5);
 870 TM_ATTR(sched6, 6);
 871 TM_ATTR(sched7, 7);
 872 
 873 static struct attribute *offload_attrs[] = {
 874         &dev_attr_sched0.attr,
 875         &dev_attr_sched1.attr,
 876         &dev_attr_sched2.attr,
 877         &dev_attr_sched3.attr,
 878         &dev_attr_sched4.attr,
 879         &dev_attr_sched5.attr,
 880         &dev_attr_sched6.attr,
 881         &dev_attr_sched7.attr,
 882         NULL
 883 };
 884 
 885 static const struct attribute_group offload_attr_group = {
 886         .attrs = offload_attrs,
 887 };
 888 
 889 /*
 890  * Sends an sk_buff to an offload queue driver
 891  * after dealing with any active network taps.
 892  */
 893 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 894 {
 895         int ret;
 896 
 897         local_bh_disable();
 898         ret = t3_offload_tx(tdev, skb);
 899         local_bh_enable();
 900         return ret;
 901 }
 902 
 903 static int write_smt_entry(struct adapter *adapter, int idx)
 904 {
 905         struct cpl_smt_write_req *req;
 906         struct port_info *pi = netdev_priv(adapter->port[idx]);
 907         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 908 
 909         if (!skb)
 910                 return -ENOMEM;
 911 
 912         req = __skb_put(skb, sizeof(*req));
 913         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 914         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 915         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 916         req->iff = idx;
 917         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 918         memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 919         skb->priority = 1;
 920         offload_tx(&adapter->tdev, skb);
 921         return 0;
 922 }
 923 
 924 static int init_smt(struct adapter *adapter)
 925 {
 926         int i;
 927 
 928         for_each_port(adapter, i)
 929             write_smt_entry(adapter, i);
 930         return 0;
 931 }
 932 
 933 static void init_port_mtus(struct adapter *adapter)
 934 {
 935         unsigned int mtus = adapter->port[0]->mtu;
 936 
 937         if (adapter->port[1])
 938                 mtus |= adapter->port[1]->mtu << 16;
 939         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 940 }
 941 
 942 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 943                               int hi, int port)
 944 {
 945         struct sk_buff *skb;
 946         struct mngt_pktsched_wr *req;
 947         int ret;
 948 
 949         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 950         if (!skb)
 951                 skb = adap->nofail_skb;
 952         if (!skb)
 953                 return -ENOMEM;
 954 
 955         req = skb_put(skb, sizeof(*req));
 956         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 957         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 958         req->sched = sched;
 959         req->idx = qidx;
 960         req->min = lo;
 961         req->max = hi;
 962         req->binding = port;
 963         ret = t3_mgmt_tx(adap, skb);
 964         if (skb == adap->nofail_skb) {
 965                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 966                                              GFP_KERNEL);
 967                 if (!adap->nofail_skb)
 968                         ret = -ENOMEM;
 969         }
 970 
 971         return ret;
 972 }
 973 
 974 static int bind_qsets(struct adapter *adap)
 975 {
 976         int i, j, err = 0;
 977 
 978         for_each_port(adap, i) {
 979                 const struct port_info *pi = adap2pinfo(adap, i);
 980 
 981                 for (j = 0; j < pi->nqsets; ++j) {
 982                         int ret = send_pktsched_cmd(adap, 1,
 983                                                     pi->first_qset + j, -1,
 984                                                     -1, i);
 985                         if (ret)
 986                                 err = ret;
 987                 }
 988         }
 989 
 990         return err;
 991 }
 992 
 993 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 994         __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 995 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 996 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 997         __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 998 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 999 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1000 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1001 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1002 MODULE_FIRMWARE(FW_FNAME);
1003 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1004 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1005 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1006 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1007 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008 
1009 static inline const char *get_edc_fw_name(int edc_idx)
1010 {
1011         const char *fw_name = NULL;
1012 
1013         switch (edc_idx) {
1014         case EDC_OPT_AEL2005:
1015                 fw_name = AEL2005_OPT_EDC_NAME;
1016                 break;
1017         case EDC_TWX_AEL2005:
1018                 fw_name = AEL2005_TWX_EDC_NAME;
1019                 break;
1020         case EDC_TWX_AEL2020:
1021                 fw_name = AEL2020_TWX_EDC_NAME;
1022                 break;
1023         }
1024         return fw_name;
1025 }
1026 
1027 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028 {
1029         struct adapter *adapter = phy->adapter;
1030         const struct firmware *fw;
1031         const char *fw_name;
1032         u32 csum;
1033         const __be32 *p;
1034         u16 *cache = phy->phy_cache;
1035         int i, ret = -EINVAL;
1036 
1037         fw_name = get_edc_fw_name(edc_idx);
1038         if (fw_name)
1039                 ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1040         if (ret < 0) {
1041                 dev_err(&adapter->pdev->dev,
1042                         "could not upgrade firmware: unable to load %s\n",
1043                         fw_name);
1044                 return ret;
1045         }
1046 
1047         /* check size, take checksum in account */
1048         if (fw->size > size + 4) {
1049                 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1050                        (unsigned int)fw->size, size + 4);
1051                 ret = -EINVAL;
1052         }
1053 
1054         /* compute checksum */
1055         p = (const __be32 *)fw->data;
1056         for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1057                 csum += ntohl(p[i]);
1058 
1059         if (csum != 0xffffffff) {
1060                 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1061                        csum);
1062                 ret = -EINVAL;
1063         }
1064 
1065         for (i = 0; i < size / 4 ; i++) {
1066                 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1067                 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1068         }
1069 
1070         release_firmware(fw);
1071 
1072         return ret;
1073 }
1074 
1075 static int upgrade_fw(struct adapter *adap)
1076 {
1077         int ret;
1078         const struct firmware *fw;
1079         struct device *dev = &adap->pdev->dev;
1080 
1081         ret = request_firmware(&fw, FW_FNAME, dev);
1082         if (ret < 0) {
1083                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1084                         FW_FNAME);
1085                 return ret;
1086         }
1087         ret = t3_load_fw(adap, fw->data, fw->size);
1088         release_firmware(fw);
1089 
1090         if (ret == 0)
1091                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1092                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093         else
1094                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1095                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1096 
1097         return ret;
1098 }
1099 
1100 static inline char t3rev2char(struct adapter *adapter)
1101 {
1102         char rev = 0;
1103 
1104         switch(adapter->params.rev) {
1105         case T3_REV_B:
1106         case T3_REV_B2:
1107                 rev = 'b';
1108                 break;
1109         case T3_REV_C:
1110                 rev = 'c';
1111                 break;
1112         }
1113         return rev;
1114 }
1115 
1116 static int update_tpsram(struct adapter *adap)
1117 {
1118         const struct firmware *tpsram;
1119         char buf[64];
1120         struct device *dev = &adap->pdev->dev;
1121         int ret;
1122         char rev;
1123 
1124         rev = t3rev2char(adap);
1125         if (!rev)
1126                 return 0;
1127 
1128         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129 
1130         ret = request_firmware(&tpsram, buf, dev);
1131         if (ret < 0) {
1132                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1133                         buf);
1134                 return ret;
1135         }
1136 
1137         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138         if (ret)
1139                 goto release_tpsram;
1140 
1141         ret = t3_set_proto_sram(adap, tpsram->data);
1142         if (ret == 0)
1143                 dev_info(dev,
1144                          "successful update of protocol engine "
1145                          "to %d.%d.%d\n",
1146                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147         else
1148                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1149                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150         if (ret)
1151                 dev_err(dev, "loading protocol SRAM failed\n");
1152 
1153 release_tpsram:
1154         release_firmware(tpsram);
1155 
1156         return ret;
1157 }
1158 
1159 /**
1160  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1161  * @adap: the adapter
1162  * @p: the port
1163  *
1164  * Ensures that current Rx processing on any of the queues associated with
1165  * the given port completes before returning.  We do this by acquiring and
1166  * releasing the locks of the response queues associated with the port.
1167  */
1168 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1169 {
1170         int i;
1171 
1172         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1173                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
1174 
1175                 spin_lock_irq(&q->lock);
1176                 spin_unlock_irq(&q->lock);
1177         }
1178 }
1179 
1180 static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1181 {
1182         struct port_info *pi = netdev_priv(dev);
1183         struct adapter *adapter = pi->adapter;
1184 
1185         if (adapter->params.rev > 0) {
1186                 t3_set_vlan_accel(adapter, 1 << pi->port_id,
1187                                   features & NETIF_F_HW_VLAN_CTAG_RX);
1188         } else {
1189                 /* single control for all ports */
1190                 unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1191 
1192                 for_each_port(adapter, i)
1193                         have_vlans |=
1194                                 adapter->port[i]->features &
1195                                 NETIF_F_HW_VLAN_CTAG_RX;
1196 
1197                 t3_set_vlan_accel(adapter, 1, have_vlans);
1198         }
1199         t3_synchronize_rx(adapter, pi);
1200 }
1201 
1202 /**
1203  *      cxgb_up - enable the adapter
1204  *      @adapter: adapter being enabled
1205  *
1206  *      Called when the first port is enabled, this function performs the
1207  *      actions necessary to make an adapter operational, such as completing
1208  *      the initialization of HW modules, and enabling interrupts.
1209  *
1210  *      Must be called with the rtnl lock held.
1211  */
1212 static int cxgb_up(struct adapter *adap)
1213 {
1214         int i, err;
1215 
1216         if (!(adap->flags & FULL_INIT_DONE)) {
1217                 err = t3_check_fw_version(adap);
1218                 if (err == -EINVAL) {
1219                         err = upgrade_fw(adap);
1220                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1221                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1222                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1223                 }
1224 
1225                 err = t3_check_tpsram_version(adap);
1226                 if (err == -EINVAL) {
1227                         err = update_tpsram(adap);
1228                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1229                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1230                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1231                 }
1232 
1233                 /*
1234                  * Clear interrupts now to catch errors if t3_init_hw fails.
1235                  * We clear them again later as initialization may trigger
1236                  * conditions that can interrupt.
1237                  */
1238                 t3_intr_clear(adap);
1239 
1240                 err = t3_init_hw(adap, 0);
1241                 if (err)
1242                         goto out;
1243 
1244                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1245                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1246 
1247                 err = setup_sge_qsets(adap);
1248                 if (err)
1249                         goto out;
1250 
1251                 for_each_port(adap, i)
1252                         cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1253 
1254                 setup_rss(adap);
1255                 if (!(adap->flags & NAPI_INIT))
1256                         init_napi(adap);
1257 
1258                 t3_start_sge_timers(adap);
1259                 adap->flags |= FULL_INIT_DONE;
1260         }
1261 
1262         t3_intr_clear(adap);
1263 
1264         if (adap->flags & USING_MSIX) {
1265                 name_msix_vecs(adap);
1266                 err = request_irq(adap->msix_info[0].vec,
1267                                   t3_async_intr_handler, 0,
1268                                   adap->msix_info[0].desc, adap);
1269                 if (err)
1270                         goto irq_err;
1271 
1272                 err = request_msix_data_irqs(adap);
1273                 if (err) {
1274                         free_irq(adap->msix_info[0].vec, adap);
1275                         goto irq_err;
1276                 }
1277         } else if ((err = request_irq(adap->pdev->irq,
1278                                       t3_intr_handler(adap,
1279                                                       adap->sge.qs[0].rspq.
1280                                                       polling),
1281                                       (adap->flags & USING_MSI) ?
1282                                        0 : IRQF_SHARED,
1283                                       adap->name, adap)))
1284                 goto irq_err;
1285 
1286         enable_all_napi(adap);
1287         t3_sge_start(adap);
1288         t3_intr_enable(adap);
1289 
1290         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1291             is_offload(adap) && init_tp_parity(adap) == 0)
1292                 adap->flags |= TP_PARITY_INIT;
1293 
1294         if (adap->flags & TP_PARITY_INIT) {
1295                 t3_write_reg(adap, A_TP_INT_CAUSE,
1296                              F_CMCACHEPERR | F_ARPLUTPERR);
1297                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1298         }
1299 
1300         if (!(adap->flags & QUEUES_BOUND)) {
1301                 int ret = bind_qsets(adap);
1302 
1303                 if (ret < 0) {
1304                         CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1305                         t3_intr_disable(adap);
1306                         free_irq_resources(adap);
1307                         err = ret;
1308                         goto out;
1309                 }
1310                 adap->flags |= QUEUES_BOUND;
1311         }
1312 
1313 out:
1314         return err;
1315 irq_err:
1316         CH_ERR(adap, "request_irq failed, err %d\n", err);
1317         goto out;
1318 }
1319 
1320 /*
1321  * Release resources when all the ports and offloading have been stopped.
1322  */
1323 static void cxgb_down(struct adapter *adapter, int on_wq)
1324 {
1325         t3_sge_stop(adapter);
1326         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1327         t3_intr_disable(adapter);
1328         spin_unlock_irq(&adapter->work_lock);
1329 
1330         free_irq_resources(adapter);
1331         quiesce_rx(adapter);
1332         t3_sge_stop(adapter);
1333         if (!on_wq)
1334                 flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1335 }
1336 
1337 static void schedule_chk_task(struct adapter *adap)
1338 {
1339         unsigned int timeo;
1340 
1341         timeo = adap->params.linkpoll_period ?
1342             (HZ * adap->params.linkpoll_period) / 10 :
1343             adap->params.stats_update_period * HZ;
1344         if (timeo)
1345                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1346 }
1347 
1348 static int offload_open(struct net_device *dev)
1349 {
1350         struct port_info *pi = netdev_priv(dev);
1351         struct adapter *adapter = pi->adapter;
1352         struct t3cdev *tdev = dev2t3cdev(dev);
1353         int adap_up = adapter->open_device_map & PORT_MASK;
1354         int err;
1355 
1356         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1357                 return 0;
1358 
1359         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360                 goto out;
1361 
1362         t3_tp_set_offload_mode(adapter, 1);
1363         tdev->lldev = adapter->port[0];
1364         err = cxgb3_offload_activate(adapter);
1365         if (err)
1366                 goto out;
1367 
1368         init_port_mtus(adapter);
1369         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1370                      adapter->params.b_wnd,
1371                      adapter->params.rev == 0 ?
1372                      adapter->port[0]->mtu : 0xffff);
1373         init_smt(adapter);
1374 
1375         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1376                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1377 
1378         /* Call back all registered clients */
1379         cxgb3_add_clients(tdev);
1380 
1381 out:
1382         /* restore them in case the offload module has changed them */
1383         if (err) {
1384                 t3_tp_set_offload_mode(adapter, 0);
1385                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1386                 cxgb3_set_dummy_ops(tdev);
1387         }
1388         return err;
1389 }
1390 
1391 static int offload_close(struct t3cdev *tdev)
1392 {
1393         struct adapter *adapter = tdev2adap(tdev);
1394         struct t3c_data *td = T3C_DATA(tdev);
1395 
1396         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1397                 return 0;
1398 
1399         /* Call back all registered clients */
1400         cxgb3_remove_clients(tdev);
1401 
1402         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1403 
1404         /* Flush work scheduled while releasing TIDs */
1405         flush_work(&td->tid_release_task);
1406 
1407         tdev->lldev = NULL;
1408         cxgb3_set_dummy_ops(tdev);
1409         t3_tp_set_offload_mode(adapter, 0);
1410         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1411 
1412         if (!adapter->open_device_map)
1413                 cxgb_down(adapter, 0);
1414 
1415         cxgb3_offload_deactivate(adapter);
1416         return 0;
1417 }
1418 
1419 static int cxgb_open(struct net_device *dev)
1420 {
1421         struct port_info *pi = netdev_priv(dev);
1422         struct adapter *adapter = pi->adapter;
1423         int other_ports = adapter->open_device_map & PORT_MASK;
1424         int err;
1425 
1426         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1427                 return err;
1428 
1429         set_bit(pi->port_id, &adapter->open_device_map);
1430         if (is_offload(adapter) && !ofld_disable) {
1431                 err = offload_open(dev);
1432                 if (err)
1433                         pr_warn("Could not initialize offload capabilities\n");
1434         }
1435 
1436         netif_set_real_num_tx_queues(dev, pi->nqsets);
1437         err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1438         if (err)
1439                 return err;
1440         link_start(dev);
1441         t3_port_intr_enable(adapter, pi->port_id);
1442         netif_tx_start_all_queues(dev);
1443         if (!other_ports)
1444                 schedule_chk_task(adapter);
1445 
1446         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1447         return 0;
1448 }
1449 
1450 static int __cxgb_close(struct net_device *dev, int on_wq)
1451 {
1452         struct port_info *pi = netdev_priv(dev);
1453         struct adapter *adapter = pi->adapter;
1454 
1455         
1456         if (!adapter->open_device_map)
1457                 return 0;
1458 
1459         /* Stop link fault interrupts */
1460         t3_xgm_intr_disable(adapter, pi->port_id);
1461         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1462 
1463         t3_port_intr_disable(adapter, pi->port_id);
1464         netif_tx_stop_all_queues(dev);
1465         pi->phy.ops->power_down(&pi->phy, 1);
1466         netif_carrier_off(dev);
1467         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1468 
1469         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1470         clear_bit(pi->port_id, &adapter->open_device_map);
1471         spin_unlock_irq(&adapter->work_lock);
1472 
1473         if (!(adapter->open_device_map & PORT_MASK))
1474                 cancel_delayed_work_sync(&adapter->adap_check_task);
1475 
1476         if (!adapter->open_device_map)
1477                 cxgb_down(adapter, on_wq);
1478 
1479         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1480         return 0;
1481 }
1482 
1483 static int cxgb_close(struct net_device *dev)
1484 {
1485         return __cxgb_close(dev, 0);
1486 }
1487 
1488 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1489 {
1490         struct port_info *pi = netdev_priv(dev);
1491         struct adapter *adapter = pi->adapter;
1492         struct net_device_stats *ns = &dev->stats;
1493         const struct mac_stats *pstats;
1494 
1495         spin_lock(&adapter->stats_lock);
1496         pstats = t3_mac_update_stats(&pi->mac);
1497         spin_unlock(&adapter->stats_lock);
1498 
1499         ns->tx_bytes = pstats->tx_octets;
1500         ns->tx_packets = pstats->tx_frames;
1501         ns->rx_bytes = pstats->rx_octets;
1502         ns->rx_packets = pstats->rx_frames;
1503         ns->multicast = pstats->rx_mcast_frames;
1504 
1505         ns->tx_errors = pstats->tx_underrun;
1506         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1507             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1508             pstats->rx_fifo_ovfl;
1509 
1510         /* detailed rx_errors */
1511         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1512         ns->rx_over_errors = 0;
1513         ns->rx_crc_errors = pstats->rx_fcs_errs;
1514         ns->rx_frame_errors = pstats->rx_symbol_errs;
1515         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1516         ns->rx_missed_errors = pstats->rx_cong_drops;
1517 
1518         /* detailed tx_errors */
1519         ns->tx_aborted_errors = 0;
1520         ns->tx_carrier_errors = 0;
1521         ns->tx_fifo_errors = pstats->tx_underrun;
1522         ns->tx_heartbeat_errors = 0;
1523         ns->tx_window_errors = 0;
1524         return ns;
1525 }
1526 
1527 static u32 get_msglevel(struct net_device *dev)
1528 {
1529         struct port_info *pi = netdev_priv(dev);
1530         struct adapter *adapter = pi->adapter;
1531 
1532         return adapter->msg_enable;
1533 }
1534 
1535 static void set_msglevel(struct net_device *dev, u32 val)
1536 {
1537         struct port_info *pi = netdev_priv(dev);
1538         struct adapter *adapter = pi->adapter;
1539 
1540         adapter->msg_enable = val;
1541 }
1542 
1543 static const char stats_strings[][ETH_GSTRING_LEN] = {
1544         "TxOctetsOK         ",
1545         "TxFramesOK         ",
1546         "TxMulticastFramesOK",
1547         "TxBroadcastFramesOK",
1548         "TxPauseFrames      ",
1549         "TxUnderrun         ",
1550         "TxExtUnderrun      ",
1551 
1552         "TxFrames64         ",
1553         "TxFrames65To127    ",
1554         "TxFrames128To255   ",
1555         "TxFrames256To511   ",
1556         "TxFrames512To1023  ",
1557         "TxFrames1024To1518 ",
1558         "TxFrames1519ToMax  ",
1559 
1560         "RxOctetsOK         ",
1561         "RxFramesOK         ",
1562         "RxMulticastFramesOK",
1563         "RxBroadcastFramesOK",
1564         "RxPauseFrames      ",
1565         "RxFCSErrors        ",
1566         "RxSymbolErrors     ",
1567         "RxShortErrors      ",
1568         "RxJabberErrors     ",
1569         "RxLengthErrors     ",
1570         "RxFIFOoverflow     ",
1571 
1572         "RxFrames64         ",
1573         "RxFrames65To127    ",
1574         "RxFrames128To255   ",
1575         "RxFrames256To511   ",
1576         "RxFrames512To1023  ",
1577         "RxFrames1024To1518 ",
1578         "RxFrames1519ToMax  ",
1579 
1580         "PhyFIFOErrors      ",
1581         "TSO                ",
1582         "VLANextractions    ",
1583         "VLANinsertions     ",
1584         "TxCsumOffload      ",
1585         "RxCsumGood         ",
1586         "LroAggregated      ",
1587         "LroFlushed         ",
1588         "LroNoDesc          ",
1589         "RxDrops            ",
1590 
1591         "CheckTXEnToggled   ",
1592         "CheckResets        ",
1593 
1594         "LinkFaults         ",
1595 };
1596 
1597 static int get_sset_count(struct net_device *dev, int sset)
1598 {
1599         switch (sset) {
1600         case ETH_SS_STATS:
1601                 return ARRAY_SIZE(stats_strings);
1602         default:
1603                 return -EOPNOTSUPP;
1604         }
1605 }
1606 
1607 #define T3_REGMAP_SIZE (3 * 1024)
1608 
1609 static int get_regs_len(struct net_device *dev)
1610 {
1611         return T3_REGMAP_SIZE;
1612 }
1613 
1614 static int get_eeprom_len(struct net_device *dev)
1615 {
1616         return EEPROMSIZE;
1617 }
1618 
1619 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1620 {
1621         struct port_info *pi = netdev_priv(dev);
1622         struct adapter *adapter = pi->adapter;
1623         u32 fw_vers = 0;
1624         u32 tp_vers = 0;
1625 
1626         spin_lock(&adapter->stats_lock);
1627         t3_get_fw_version(adapter, &fw_vers);
1628         t3_get_tp_version(adapter, &tp_vers);
1629         spin_unlock(&adapter->stats_lock);
1630 
1631         strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1632         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1633         strlcpy(info->bus_info, pci_name(adapter->pdev),
1634                 sizeof(info->bus_info));
1635         if (fw_vers)
1636                 snprintf(info->fw_version, sizeof(info->fw_version),
1637                          "%s %u.%u.%u TP %u.%u.%u",
1638                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1639                          G_FW_VERSION_MAJOR(fw_vers),
1640                          G_FW_VERSION_MINOR(fw_vers),
1641                          G_FW_VERSION_MICRO(fw_vers),
1642                          G_TP_VERSION_MAJOR(tp_vers),
1643                          G_TP_VERSION_MINOR(tp_vers),
1644                          G_TP_VERSION_MICRO(tp_vers));
1645 }
1646 
1647 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1648 {
1649         if (stringset == ETH_SS_STATS)
1650                 memcpy(data, stats_strings, sizeof(stats_strings));
1651 }
1652 
1653 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1654                                             struct port_info *p, int idx)
1655 {
1656         int i;
1657         unsigned long tot = 0;
1658 
1659         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1660                 tot += adapter->sge.qs[i].port_stats[idx];
1661         return tot;
1662 }
1663 
1664 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1665                       u64 *data)
1666 {
1667         struct port_info *pi = netdev_priv(dev);
1668         struct adapter *adapter = pi->adapter;
1669         const struct mac_stats *s;
1670 
1671         spin_lock(&adapter->stats_lock);
1672         s = t3_mac_update_stats(&pi->mac);
1673         spin_unlock(&adapter->stats_lock);
1674 
1675         *data++ = s->tx_octets;
1676         *data++ = s->tx_frames;
1677         *data++ = s->tx_mcast_frames;
1678         *data++ = s->tx_bcast_frames;
1679         *data++ = s->tx_pause;
1680         *data++ = s->tx_underrun;
1681         *data++ = s->tx_fifo_urun;
1682 
1683         *data++ = s->tx_frames_64;
1684         *data++ = s->tx_frames_65_127;
1685         *data++ = s->tx_frames_128_255;
1686         *data++ = s->tx_frames_256_511;
1687         *data++ = s->tx_frames_512_1023;
1688         *data++ = s->tx_frames_1024_1518;
1689         *data++ = s->tx_frames_1519_max;
1690 
1691         *data++ = s->rx_octets;
1692         *data++ = s->rx_frames;
1693         *data++ = s->rx_mcast_frames;
1694         *data++ = s->rx_bcast_frames;
1695         *data++ = s->rx_pause;
1696         *data++ = s->rx_fcs_errs;
1697         *data++ = s->rx_symbol_errs;
1698         *data++ = s->rx_short;
1699         *data++ = s->rx_jabber;
1700         *data++ = s->rx_too_long;
1701         *data++ = s->rx_fifo_ovfl;
1702 
1703         *data++ = s->rx_frames_64;
1704         *data++ = s->rx_frames_65_127;
1705         *data++ = s->rx_frames_128_255;
1706         *data++ = s->rx_frames_256_511;
1707         *data++ = s->rx_frames_512_1023;
1708         *data++ = s->rx_frames_1024_1518;
1709         *data++ = s->rx_frames_1519_max;
1710 
1711         *data++ = pi->phy.fifo_errors;
1712 
1713         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1714         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1715         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1716         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1717         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1718         *data++ = 0;
1719         *data++ = 0;
1720         *data++ = 0;
1721         *data++ = s->rx_cong_drops;
1722 
1723         *data++ = s->num_toggled;
1724         *data++ = s->num_resets;
1725 
1726         *data++ = s->link_faults;
1727 }
1728 
1729 static inline void reg_block_dump(struct adapter *ap, void *buf,
1730                                   unsigned int start, unsigned int end)
1731 {
1732         u32 *p = buf + start;
1733 
1734         for (; start <= end; start += sizeof(u32))
1735                 *p++ = t3_read_reg(ap, start);
1736 }
1737 
1738 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1739                      void *buf)
1740 {
1741         struct port_info *pi = netdev_priv(dev);
1742         struct adapter *ap = pi->adapter;
1743 
1744         /*
1745          * Version scheme:
1746          * bits 0..9: chip version
1747          * bits 10..15: chip revision
1748          * bit 31: set for PCIe cards
1749          */
1750         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1751 
1752         /*
1753          * We skip the MAC statistics registers because they are clear-on-read.
1754          * Also reading multi-register stats would need to synchronize with the
1755          * periodic mac stats accumulation.  Hard to justify the complexity.
1756          */
1757         memset(buf, 0, T3_REGMAP_SIZE);
1758         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1759         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1760         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1761         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1762         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1763         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1764                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1765         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1766                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1767 }
1768 
1769 static int restart_autoneg(struct net_device *dev)
1770 {
1771         struct port_info *p = netdev_priv(dev);
1772 
1773         if (!netif_running(dev))
1774                 return -EAGAIN;
1775         if (p->link_config.autoneg != AUTONEG_ENABLE)
1776                 return -EINVAL;
1777         p->phy.ops->autoneg_restart(&p->phy);
1778         return 0;
1779 }
1780 
1781 static int set_phys_id(struct net_device *dev,
1782                        enum ethtool_phys_id_state state)
1783 {
1784         struct port_info *pi = netdev_priv(dev);
1785         struct adapter *adapter = pi->adapter;
1786 
1787         switch (state) {
1788         case ETHTOOL_ID_ACTIVE:
1789                 return 1;       /* cycle on/off once per second */
1790 
1791         case ETHTOOL_ID_OFF:
1792                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1793                 break;
1794 
1795         case ETHTOOL_ID_ON:
1796         case ETHTOOL_ID_INACTIVE:
1797                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1798                          F_GPIO0_OUT_VAL);
1799         }
1800 
1801         return 0;
1802 }
1803 
1804 static int get_link_ksettings(struct net_device *dev,
1805                               struct ethtool_link_ksettings *cmd)
1806 {
1807         struct port_info *p = netdev_priv(dev);
1808         u32 supported;
1809 
1810         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1811                                                 p->link_config.supported);
1812         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1813                                                 p->link_config.advertising);
1814 
1815         if (netif_carrier_ok(dev)) {
1816                 cmd->base.speed = p->link_config.speed;
1817                 cmd->base.duplex = p->link_config.duplex;
1818         } else {
1819                 cmd->base.speed = SPEED_UNKNOWN;
1820                 cmd->base.duplex = DUPLEX_UNKNOWN;
1821         }
1822 
1823         ethtool_convert_link_mode_to_legacy_u32(&supported,
1824                                                 cmd->link_modes.supported);
1825 
1826         cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1827         cmd->base.phy_address = p->phy.mdio.prtad;
1828         cmd->base.autoneg = p->link_config.autoneg;
1829         return 0;
1830 }
1831 
1832 static int speed_duplex_to_caps(int speed, int duplex)
1833 {
1834         int cap = 0;
1835 
1836         switch (speed) {
1837         case SPEED_10:
1838                 if (duplex == DUPLEX_FULL)
1839                         cap = SUPPORTED_10baseT_Full;
1840                 else
1841                         cap = SUPPORTED_10baseT_Half;
1842                 break;
1843         case SPEED_100:
1844                 if (duplex == DUPLEX_FULL)
1845                         cap = SUPPORTED_100baseT_Full;
1846                 else
1847                         cap = SUPPORTED_100baseT_Half;
1848                 break;
1849         case SPEED_1000:
1850                 if (duplex == DUPLEX_FULL)
1851                         cap = SUPPORTED_1000baseT_Full;
1852                 else
1853                         cap = SUPPORTED_1000baseT_Half;
1854                 break;
1855         case SPEED_10000:
1856                 if (duplex == DUPLEX_FULL)
1857                         cap = SUPPORTED_10000baseT_Full;
1858         }
1859         return cap;
1860 }
1861 
1862 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1863                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1864                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1865                       ADVERTISED_10000baseT_Full)
1866 
1867 static int set_link_ksettings(struct net_device *dev,
1868                               const struct ethtool_link_ksettings *cmd)
1869 {
1870         struct port_info *p = netdev_priv(dev);
1871         struct link_config *lc = &p->link_config;
1872         u32 advertising;
1873 
1874         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1875                                                 cmd->link_modes.advertising);
1876 
1877         if (!(lc->supported & SUPPORTED_Autoneg)) {
1878                 /*
1879                  * PHY offers a single speed/duplex.  See if that's what's
1880                  * being requested.
1881                  */
1882                 if (cmd->base.autoneg == AUTONEG_DISABLE) {
1883                         u32 speed = cmd->base.speed;
1884                         int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1885                         if (lc->supported & cap)
1886                                 return 0;
1887                 }
1888                 return -EINVAL;
1889         }
1890 
1891         if (cmd->base.autoneg == AUTONEG_DISABLE) {
1892                 u32 speed = cmd->base.speed;
1893                 int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1894 
1895                 if (!(lc->supported & cap) || (speed == SPEED_1000))
1896                         return -EINVAL;
1897                 lc->requested_speed = speed;
1898                 lc->requested_duplex = cmd->base.duplex;
1899                 lc->advertising = 0;
1900         } else {
1901                 advertising &= ADVERTISED_MASK;
1902                 advertising &= lc->supported;
1903                 if (!advertising)
1904                         return -EINVAL;
1905                 lc->requested_speed = SPEED_INVALID;
1906                 lc->requested_duplex = DUPLEX_INVALID;
1907                 lc->advertising = advertising | ADVERTISED_Autoneg;
1908         }
1909         lc->autoneg = cmd->base.autoneg;
1910         if (netif_running(dev))
1911                 t3_link_start(&p->phy, &p->mac, lc);
1912         return 0;
1913 }
1914 
1915 static void get_pauseparam(struct net_device *dev,
1916                            struct ethtool_pauseparam *epause)
1917 {
1918         struct port_info *p = netdev_priv(dev);
1919 
1920         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1921         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1922         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1923 }
1924 
1925 static int set_pauseparam(struct net_device *dev,
1926                           struct ethtool_pauseparam *epause)
1927 {
1928         struct port_info *p = netdev_priv(dev);
1929         struct link_config *lc = &p->link_config;
1930 
1931         if (epause->autoneg == AUTONEG_DISABLE)
1932                 lc->requested_fc = 0;
1933         else if (lc->supported & SUPPORTED_Autoneg)
1934                 lc->requested_fc = PAUSE_AUTONEG;
1935         else
1936                 return -EINVAL;
1937 
1938         if (epause->rx_pause)
1939                 lc->requested_fc |= PAUSE_RX;
1940         if (epause->tx_pause)
1941                 lc->requested_fc |= PAUSE_TX;
1942         if (lc->autoneg == AUTONEG_ENABLE) {
1943                 if (netif_running(dev))
1944                         t3_link_start(&p->phy, &p->mac, lc);
1945         } else {
1946                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1947                 if (netif_running(dev))
1948                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1949         }
1950         return 0;
1951 }
1952 
1953 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1954 {
1955         struct port_info *pi = netdev_priv(dev);
1956         struct adapter *adapter = pi->adapter;
1957         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1958 
1959         e->rx_max_pending = MAX_RX_BUFFERS;
1960         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1961         e->tx_max_pending = MAX_TXQ_ENTRIES;
1962 
1963         e->rx_pending = q->fl_size;
1964         e->rx_mini_pending = q->rspq_size;
1965         e->rx_jumbo_pending = q->jumbo_size;
1966         e->tx_pending = q->txq_size[0];
1967 }
1968 
1969 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1970 {
1971         struct port_info *pi = netdev_priv(dev);
1972         struct adapter *adapter = pi->adapter;
1973         struct qset_params *q;
1974         int i;
1975 
1976         if (e->rx_pending > MAX_RX_BUFFERS ||
1977             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1978             e->tx_pending > MAX_TXQ_ENTRIES ||
1979             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1980             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1981             e->rx_pending < MIN_FL_ENTRIES ||
1982             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1983             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1984                 return -EINVAL;
1985 
1986         if (adapter->flags & FULL_INIT_DONE)
1987                 return -EBUSY;
1988 
1989         q = &adapter->params.sge.qset[pi->first_qset];
1990         for (i = 0; i < pi->nqsets; ++i, ++q) {
1991                 q->rspq_size = e->rx_mini_pending;
1992                 q->fl_size = e->rx_pending;
1993                 q->jumbo_size = e->rx_jumbo_pending;
1994                 q->txq_size[0] = e->tx_pending;
1995                 q->txq_size[1] = e->tx_pending;
1996                 q->txq_size[2] = e->tx_pending;
1997         }
1998         return 0;
1999 }
2000 
2001 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2002 {
2003         struct port_info *pi = netdev_priv(dev);
2004         struct adapter *adapter = pi->adapter;
2005         struct qset_params *qsp;
2006         struct sge_qset *qs;
2007         int i;
2008 
2009         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2010                 return -EINVAL;
2011 
2012         for (i = 0; i < pi->nqsets; i++) {
2013                 qsp = &adapter->params.sge.qset[i];
2014                 qs = &adapter->sge.qs[i];
2015                 qsp->coalesce_usecs = c->rx_coalesce_usecs;
2016                 t3_update_qset_coalesce(qs, qsp);
2017         }
2018 
2019         return 0;
2020 }
2021 
2022 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2023 {
2024         struct port_info *pi = netdev_priv(dev);
2025         struct adapter *adapter = pi->adapter;
2026         struct qset_params *q = adapter->params.sge.qset;
2027 
2028         c->rx_coalesce_usecs = q->coalesce_usecs;
2029         return 0;
2030 }
2031 
2032 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2033                       u8 * data)
2034 {
2035         struct port_info *pi = netdev_priv(dev);
2036         struct adapter *adapter = pi->adapter;
2037         int i, err = 0;
2038 
2039         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2040         if (!buf)
2041                 return -ENOMEM;
2042 
2043         e->magic = EEPROM_MAGIC;
2044         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2045                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2046 
2047         if (!err)
2048                 memcpy(data, buf + e->offset, e->len);
2049         kfree(buf);
2050         return err;
2051 }
2052 
2053 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2054                       u8 * data)
2055 {
2056         struct port_info *pi = netdev_priv(dev);
2057         struct adapter *adapter = pi->adapter;
2058         u32 aligned_offset, aligned_len;
2059         __le32 *p;
2060         u8 *buf;
2061         int err;
2062 
2063         if (eeprom->magic != EEPROM_MAGIC)
2064                 return -EINVAL;
2065 
2066         aligned_offset = eeprom->offset & ~3;
2067         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2068 
2069         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2070                 buf = kmalloc(aligned_len, GFP_KERNEL);
2071                 if (!buf)
2072                         return -ENOMEM;
2073                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2074                 if (!err && aligned_len > 4)
2075                         err = t3_seeprom_read(adapter,
2076                                               aligned_offset + aligned_len - 4,
2077                                               (__le32 *) & buf[aligned_len - 4]);
2078                 if (err)
2079                         goto out;
2080                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2081         } else
2082                 buf = data;
2083 
2084         err = t3_seeprom_wp(adapter, 0);
2085         if (err)
2086                 goto out;
2087 
2088         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2089                 err = t3_seeprom_write(adapter, aligned_offset, *p);
2090                 aligned_offset += 4;
2091         }
2092 
2093         if (!err)
2094                 err = t3_seeprom_wp(adapter, 1);
2095 out:
2096         if (buf != data)
2097                 kfree(buf);
2098         return err;
2099 }
2100 
2101 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2102 {
2103         wol->supported = 0;
2104         wol->wolopts = 0;
2105         memset(&wol->sopass, 0, sizeof(wol->sopass));
2106 }
2107 
2108 static const struct ethtool_ops cxgb_ethtool_ops = {
2109         .get_drvinfo = get_drvinfo,
2110         .get_msglevel = get_msglevel,
2111         .set_msglevel = set_msglevel,
2112         .get_ringparam = get_sge_param,
2113         .set_ringparam = set_sge_param,
2114         .get_coalesce = get_coalesce,
2115         .set_coalesce = set_coalesce,
2116         .get_eeprom_len = get_eeprom_len,
2117         .get_eeprom = get_eeprom,
2118         .set_eeprom = set_eeprom,
2119         .get_pauseparam = get_pauseparam,
2120         .set_pauseparam = set_pauseparam,
2121         .get_link = ethtool_op_get_link,
2122         .get_strings = get_strings,
2123         .set_phys_id = set_phys_id,
2124         .nway_reset = restart_autoneg,
2125         .get_sset_count = get_sset_count,
2126         .get_ethtool_stats = get_stats,
2127         .get_regs_len = get_regs_len,
2128         .get_regs = get_regs,
2129         .get_wol = get_wol,
2130         .get_link_ksettings = get_link_ksettings,
2131         .set_link_ksettings = set_link_ksettings,
2132 };
2133 
2134 static int in_range(int val, int lo, int hi)
2135 {
2136         return val < 0 || (val <= hi && val >= lo);
2137 }
2138 
2139 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2140 {
2141         struct port_info *pi = netdev_priv(dev);
2142         struct adapter *adapter = pi->adapter;
2143         u32 cmd;
2144         int ret;
2145 
2146         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2147                 return -EFAULT;
2148 
2149         switch (cmd) {
2150         case CHELSIO_SET_QSET_PARAMS:{
2151                 int i;
2152                 struct qset_params *q;
2153                 struct ch_qset_params t;
2154                 int q1 = pi->first_qset;
2155                 int nqsets = pi->nqsets;
2156 
2157                 if (!capable(CAP_NET_ADMIN))
2158                         return -EPERM;
2159                 if (copy_from_user(&t, useraddr, sizeof(t)))
2160                         return -EFAULT;
2161                 if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2162                         return -EINVAL;
2163                 if (t.qset_idx >= SGE_QSETS)
2164                         return -EINVAL;
2165                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2166                     !in_range(t.cong_thres, 0, 255) ||
2167                     !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2168                               MAX_TXQ_ENTRIES) ||
2169                     !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2170                               MAX_TXQ_ENTRIES) ||
2171                     !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2172                               MAX_CTRL_TXQ_ENTRIES) ||
2173                     !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2174                               MAX_RX_BUFFERS) ||
2175                     !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2176                               MAX_RX_JUMBO_BUFFERS) ||
2177                     !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2178                               MAX_RSPQ_ENTRIES))
2179                         return -EINVAL;
2180 
2181                 if ((adapter->flags & FULL_INIT_DONE) &&
2182                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2183                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2184                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2185                         t.polling >= 0 || t.cong_thres >= 0))
2186                         return -EBUSY;
2187 
2188                 /* Allow setting of any available qset when offload enabled */
2189                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2190                         q1 = 0;
2191                         for_each_port(adapter, i) {
2192                                 pi = adap2pinfo(adapter, i);
2193                                 nqsets += pi->first_qset + pi->nqsets;
2194                         }
2195                 }
2196 
2197                 if (t.qset_idx < q1)
2198                         return -EINVAL;
2199                 if (t.qset_idx > q1 + nqsets - 1)
2200                         return -EINVAL;
2201 
2202                 q = &adapter->params.sge.qset[t.qset_idx];
2203 
2204                 if (t.rspq_size >= 0)
2205                         q->rspq_size = t.rspq_size;
2206                 if (t.fl_size[0] >= 0)
2207                         q->fl_size = t.fl_size[0];
2208                 if (t.fl_size[1] >= 0)
2209                         q->jumbo_size = t.fl_size[1];
2210                 if (t.txq_size[0] >= 0)
2211                         q->txq_size[0] = t.txq_size[0];
2212                 if (t.txq_size[1] >= 0)
2213                         q->txq_size[1] = t.txq_size[1];
2214                 if (t.txq_size[2] >= 0)
2215                         q->txq_size[2] = t.txq_size[2];
2216                 if (t.cong_thres >= 0)
2217                         q->cong_thres = t.cong_thres;
2218                 if (t.intr_lat >= 0) {
2219                         struct sge_qset *qs =
2220                                 &adapter->sge.qs[t.qset_idx];
2221 
2222                         q->coalesce_usecs = t.intr_lat;
2223                         t3_update_qset_coalesce(qs, q);
2224                 }
2225                 if (t.polling >= 0) {
2226                         if (adapter->flags & USING_MSIX)
2227                                 q->polling = t.polling;
2228                         else {
2229                                 /* No polling with INTx for T3A */
2230                                 if (adapter->params.rev == 0 &&
2231                                         !(adapter->flags & USING_MSI))
2232                                         t.polling = 0;
2233 
2234                                 for (i = 0; i < SGE_QSETS; i++) {
2235                                         q = &adapter->params.sge.
2236                                                 qset[i];
2237                                         q->polling = t.polling;
2238                                 }
2239                         }
2240                 }
2241 
2242                 if (t.lro >= 0) {
2243                         if (t.lro)
2244                                 dev->wanted_features |= NETIF_F_GRO;
2245                         else
2246                                 dev->wanted_features &= ~NETIF_F_GRO;
2247                         netdev_update_features(dev);
2248                 }
2249 
2250                 break;
2251         }
2252         case CHELSIO_GET_QSET_PARAMS:{
2253                 struct qset_params *q;
2254                 struct ch_qset_params t;
2255                 int q1 = pi->first_qset;
2256                 int nqsets = pi->nqsets;
2257                 int i;
2258 
2259                 if (copy_from_user(&t, useraddr, sizeof(t)))
2260                         return -EFAULT;
2261 
2262                 if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2263                         return -EINVAL;
2264 
2265                 /* Display qsets for all ports when offload enabled */
2266                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2267                         q1 = 0;
2268                         for_each_port(adapter, i) {
2269                                 pi = adap2pinfo(adapter, i);
2270                                 nqsets = pi->first_qset + pi->nqsets;
2271                         }
2272                 }
2273 
2274                 if (t.qset_idx >= nqsets)
2275                         return -EINVAL;
2276                 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2277 
2278                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2279                 t.rspq_size = q->rspq_size;
2280                 t.txq_size[0] = q->txq_size[0];
2281                 t.txq_size[1] = q->txq_size[1];
2282                 t.txq_size[2] = q->txq_size[2];
2283                 t.fl_size[0] = q->fl_size;
2284                 t.fl_size[1] = q->jumbo_size;
2285                 t.polling = q->polling;
2286                 t.lro = !!(dev->features & NETIF_F_GRO);
2287                 t.intr_lat = q->coalesce_usecs;
2288                 t.cong_thres = q->cong_thres;
2289                 t.qnum = q1;
2290 
2291                 if (adapter->flags & USING_MSIX)
2292                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2293                 else
2294                         t.vector = adapter->pdev->irq;
2295 
2296                 if (copy_to_user(useraddr, &t, sizeof(t)))
2297                         return -EFAULT;
2298                 break;
2299         }
2300         case CHELSIO_SET_QSET_NUM:{
2301                 struct ch_reg edata;
2302                 unsigned int i, first_qset = 0, other_qsets = 0;
2303 
2304                 if (!capable(CAP_NET_ADMIN))
2305                         return -EPERM;
2306                 if (adapter->flags & FULL_INIT_DONE)
2307                         return -EBUSY;
2308                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2309                         return -EFAULT;
2310                 if (edata.cmd != CHELSIO_SET_QSET_NUM)
2311                         return -EINVAL;
2312                 if (edata.val < 1 ||
2313                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2314                         return -EINVAL;
2315 
2316                 for_each_port(adapter, i)
2317                         if (adapter->port[i] && adapter->port[i] != dev)
2318                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2319 
2320                 if (edata.val + other_qsets > SGE_QSETS)
2321                         return -EINVAL;
2322 
2323                 pi->nqsets = edata.val;
2324 
2325                 for_each_port(adapter, i)
2326                         if (adapter->port[i]) {
2327                                 pi = adap2pinfo(adapter, i);
2328                                 pi->first_qset = first_qset;
2329                                 first_qset += pi->nqsets;
2330                         }
2331                 break;
2332         }
2333         case CHELSIO_GET_QSET_NUM:{
2334                 struct ch_reg edata;
2335 
2336                 memset(&edata, 0, sizeof(struct ch_reg));
2337 
2338                 edata.cmd = CHELSIO_GET_QSET_NUM;
2339                 edata.val = pi->nqsets;
2340                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2341                         return -EFAULT;
2342                 break;
2343         }
2344         case CHELSIO_LOAD_FW:{
2345                 u8 *fw_data;
2346                 struct ch_mem_range t;
2347 
2348                 if (!capable(CAP_SYS_RAWIO))
2349                         return -EPERM;
2350                 if (copy_from_user(&t, useraddr, sizeof(t)))
2351                         return -EFAULT;
2352                 if (t.cmd != CHELSIO_LOAD_FW)
2353                         return -EINVAL;
2354                 /* Check t.len sanity ? */
2355                 fw_data = memdup_user(useraddr + sizeof(t), t.len);
2356                 if (IS_ERR(fw_data))
2357                         return PTR_ERR(fw_data);
2358 
2359                 ret = t3_load_fw(adapter, fw_data, t.len);
2360                 kfree(fw_data);
2361                 if (ret)
2362                         return ret;
2363                 break;
2364         }
2365         case CHELSIO_SETMTUTAB:{
2366                 struct ch_mtus m;
2367                 int i;
2368 
2369                 if (!is_offload(adapter))
2370                         return -EOPNOTSUPP;
2371                 if (!capable(CAP_NET_ADMIN))
2372                         return -EPERM;
2373                 if (offload_running(adapter))
2374                         return -EBUSY;
2375                 if (copy_from_user(&m, useraddr, sizeof(m)))
2376                         return -EFAULT;
2377                 if (m.cmd != CHELSIO_SETMTUTAB)
2378                         return -EINVAL;
2379                 if (m.nmtus != NMTUS)
2380                         return -EINVAL;
2381                 if (m.mtus[0] < 81)     /* accommodate SACK */
2382                         return -EINVAL;
2383 
2384                 /* MTUs must be in ascending order */
2385                 for (i = 1; i < NMTUS; ++i)
2386                         if (m.mtus[i] < m.mtus[i - 1])
2387                                 return -EINVAL;
2388 
2389                 memcpy(adapter->params.mtus, m.mtus,
2390                         sizeof(adapter->params.mtus));
2391                 break;
2392         }
2393         case CHELSIO_GET_PM:{
2394                 struct tp_params *p = &adapter->params.tp;
2395                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2396 
2397                 if (!is_offload(adapter))
2398                         return -EOPNOTSUPP;
2399                 m.tx_pg_sz = p->tx_pg_size;
2400                 m.tx_num_pg = p->tx_num_pgs;
2401                 m.rx_pg_sz = p->rx_pg_size;
2402                 m.rx_num_pg = p->rx_num_pgs;
2403                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2404                 if (copy_to_user(useraddr, &m, sizeof(m)))
2405                         return -EFAULT;
2406                 break;
2407         }
2408         case CHELSIO_SET_PM:{
2409                 struct ch_pm m;
2410                 struct tp_params *p = &adapter->params.tp;
2411 
2412                 if (!is_offload(adapter))
2413                         return -EOPNOTSUPP;
2414                 if (!capable(CAP_NET_ADMIN))
2415                         return -EPERM;
2416                 if (adapter->flags & FULL_INIT_DONE)
2417                         return -EBUSY;
2418                 if (copy_from_user(&m, useraddr, sizeof(m)))
2419                         return -EFAULT;
2420                 if (m.cmd != CHELSIO_SET_PM)
2421                         return -EINVAL;
2422                 if (!is_power_of_2(m.rx_pg_sz) ||
2423                         !is_power_of_2(m.tx_pg_sz))
2424                         return -EINVAL; /* not power of 2 */
2425                 if (!(m.rx_pg_sz & 0x14000))
2426                         return -EINVAL; /* not 16KB or 64KB */
2427                 if (!(m.tx_pg_sz & 0x1554000))
2428                         return -EINVAL;
2429                 if (m.tx_num_pg == -1)
2430                         m.tx_num_pg = p->tx_num_pgs;
2431                 if (m.rx_num_pg == -1)
2432                         m.rx_num_pg = p->rx_num_pgs;
2433                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2434                         return -EINVAL;
2435                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2436                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2437                         return -EINVAL;
2438                 p->rx_pg_size = m.rx_pg_sz;
2439                 p->tx_pg_size = m.tx_pg_sz;
2440                 p->rx_num_pgs = m.rx_num_pg;
2441                 p->tx_num_pgs = m.tx_num_pg;
2442                 break;
2443         }
2444         case CHELSIO_GET_MEM:{
2445                 struct ch_mem_range t;
2446                 struct mc7 *mem;
2447                 u64 buf[32];
2448 
2449                 if (!is_offload(adapter))
2450                         return -EOPNOTSUPP;
2451                 if (!capable(CAP_NET_ADMIN))
2452                         return -EPERM;
2453                 if (!(adapter->flags & FULL_INIT_DONE))
2454                         return -EIO;    /* need the memory controllers */
2455                 if (copy_from_user(&t, useraddr, sizeof(t)))
2456                         return -EFAULT;
2457                 if (t.cmd != CHELSIO_GET_MEM)
2458                         return -EINVAL;
2459                 if ((t.addr & 7) || (t.len & 7))
2460                         return -EINVAL;
2461                 if (t.mem_id == MEM_CM)
2462                         mem = &adapter->cm;
2463                 else if (t.mem_id == MEM_PMRX)
2464                         mem = &adapter->pmrx;
2465                 else if (t.mem_id == MEM_PMTX)
2466                         mem = &adapter->pmtx;
2467                 else
2468                         return -EINVAL;
2469 
2470                 /*
2471                  * Version scheme:
2472                  * bits 0..9: chip version
2473                  * bits 10..15: chip revision
2474                  */
2475                 t.version = 3 | (adapter->params.rev << 10);
2476                 if (copy_to_user(useraddr, &t, sizeof(t)))
2477                         return -EFAULT;
2478 
2479                 /*
2480                  * Read 256 bytes at a time as len can be large and we don't
2481                  * want to use huge intermediate buffers.
2482                  */
2483                 useraddr += sizeof(t);  /* advance to start of buffer */
2484                 while (t.len) {
2485                         unsigned int chunk =
2486                                 min_t(unsigned int, t.len, sizeof(buf));
2487 
2488                         ret =
2489                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2490                                                 buf);
2491                         if (ret)
2492                                 return ret;
2493                         if (copy_to_user(useraddr, buf, chunk))
2494                                 return -EFAULT;
2495                         useraddr += chunk;
2496                         t.addr += chunk;
2497                         t.len -= chunk;
2498                 }
2499                 break;
2500         }
2501         case CHELSIO_SET_TRACE_FILTER:{
2502                 struct ch_trace t;
2503                 const struct trace_params *tp;
2504 
2505                 if (!capable(CAP_NET_ADMIN))
2506                         return -EPERM;
2507                 if (!offload_running(adapter))
2508                         return -EAGAIN;
2509                 if (copy_from_user(&t, useraddr, sizeof(t)))
2510                         return -EFAULT;
2511                 if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2512                         return -EINVAL;
2513 
2514                 tp = (const struct trace_params *)&t.sip;
2515                 if (t.config_tx)
2516                         t3_config_trace_filter(adapter, tp, 0,
2517                                                 t.invert_match,
2518                                                 t.trace_tx);
2519                 if (t.config_rx)
2520                         t3_config_trace_filter(adapter, tp, 1,
2521                                                 t.invert_match,
2522                                                 t.trace_rx);
2523                 break;
2524         }
2525         default:
2526                 return -EOPNOTSUPP;
2527         }
2528         return 0;
2529 }
2530 
2531 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2532 {
2533         struct mii_ioctl_data *data = if_mii(req);
2534         struct port_info *pi = netdev_priv(dev);
2535         struct adapter *adapter = pi->adapter;
2536 
2537         switch (cmd) {
2538         case SIOCGMIIREG:
2539         case SIOCSMIIREG:
2540                 /* Convert phy_id from older PRTAD/DEVAD format */
2541                 if (is_10G(adapter) &&
2542                     !mdio_phy_id_is_c45(data->phy_id) &&
2543                     (data->phy_id & 0x1f00) &&
2544                     !(data->phy_id & 0xe0e0))
2545                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2546                                                        data->phy_id & 0x1f);
2547                 /* FALLTHRU */
2548         case SIOCGMIIPHY:
2549                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2550         case SIOCCHIOCTL:
2551                 return cxgb_extension_ioctl(dev, req->ifr_data);
2552         default:
2553                 return -EOPNOTSUPP;
2554         }
2555 }
2556 
2557 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2558 {
2559         struct port_info *pi = netdev_priv(dev);
2560         struct adapter *adapter = pi->adapter;
2561         int ret;
2562 
2563         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2564                 return ret;
2565         dev->mtu = new_mtu;
2566         init_port_mtus(adapter);
2567         if (adapter->params.rev == 0 && offload_running(adapter))
2568                 t3_load_mtus(adapter, adapter->params.mtus,
2569                              adapter->params.a_wnd, adapter->params.b_wnd,
2570                              adapter->port[0]->mtu);
2571         return 0;
2572 }
2573 
2574 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2575 {
2576         struct port_info *pi = netdev_priv(dev);
2577         struct adapter *adapter = pi->adapter;
2578         struct sockaddr *addr = p;
2579 
2580         if (!is_valid_ether_addr(addr->sa_data))
2581                 return -EADDRNOTAVAIL;
2582 
2583         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2584         t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2585         if (offload_running(adapter))
2586                 write_smt_entry(adapter, pi->port_id);
2587         return 0;
2588 }
2589 
2590 static netdev_features_t cxgb_fix_features(struct net_device *dev,
2591         netdev_features_t features)
2592 {
2593         /*
2594          * Since there is no support for separate rx/tx vlan accel
2595          * enable/disable make sure tx flag is always in same state as rx.
2596          */
2597         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2598                 features |= NETIF_F_HW_VLAN_CTAG_TX;
2599         else
2600                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2601 
2602         return features;
2603 }
2604 
2605 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2606 {
2607         netdev_features_t changed = dev->features ^ features;
2608 
2609         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2610                 cxgb_vlan_mode(dev, features);
2611 
2612         return 0;
2613 }
2614 
2615 #ifdef CONFIG_NET_POLL_CONTROLLER
2616 static void cxgb_netpoll(struct net_device *dev)
2617 {
2618         struct port_info *pi = netdev_priv(dev);
2619         struct adapter *adapter = pi->adapter;
2620         int qidx;
2621 
2622         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2623                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2624                 void *source;
2625 
2626                 if (adapter->flags & USING_MSIX)
2627                         source = qs;
2628                 else
2629                         source = adapter;
2630 
2631                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2632         }
2633 }
2634 #endif
2635 
2636 /*
2637  * Periodic accumulation of MAC statistics.
2638  */
2639 static void mac_stats_update(struct adapter *adapter)
2640 {
2641         int i;
2642 
2643         for_each_port(adapter, i) {
2644                 struct net_device *dev = adapter->port[i];
2645                 struct port_info *p = netdev_priv(dev);
2646 
2647                 if (netif_running(dev)) {
2648                         spin_lock(&adapter->stats_lock);
2649                         t3_mac_update_stats(&p->mac);
2650                         spin_unlock(&adapter->stats_lock);
2651                 }
2652         }
2653 }
2654 
2655 static void check_link_status(struct adapter *adapter)
2656 {
2657         int i;
2658 
2659         for_each_port(adapter, i) {
2660                 struct net_device *dev = adapter->port[i];
2661                 struct port_info *p = netdev_priv(dev);
2662                 int link_fault;
2663 
2664                 spin_lock_irq(&adapter->work_lock);
2665                 link_fault = p->link_fault;
2666                 spin_unlock_irq(&adapter->work_lock);
2667 
2668                 if (link_fault) {
2669                         t3_link_fault(adapter, i);
2670                         continue;
2671                 }
2672 
2673                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2674                         t3_xgm_intr_disable(adapter, i);
2675                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2676 
2677                         t3_link_changed(adapter, i);
2678                         t3_xgm_intr_enable(adapter, i);
2679                 }
2680         }
2681 }
2682 
2683 static void check_t3b2_mac(struct adapter *adapter)
2684 {
2685         int i;
2686 
2687         if (!rtnl_trylock())    /* synchronize with ifdown */
2688                 return;
2689 
2690         for_each_port(adapter, i) {
2691                 struct net_device *dev = adapter->port[i];
2692                 struct port_info *p = netdev_priv(dev);
2693                 int status;
2694 
2695                 if (!netif_running(dev))
2696                         continue;
2697 
2698                 status = 0;
2699                 if (netif_running(dev) && netif_carrier_ok(dev))
2700                         status = t3b2_mac_watchdog_task(&p->mac);
2701                 if (status == 1)
2702                         p->mac.stats.num_toggled++;
2703                 else if (status == 2) {
2704                         struct cmac *mac = &p->mac;
2705 
2706                         t3_mac_set_mtu(mac, dev->mtu);
2707                         t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2708                         cxgb_set_rxmode(dev);
2709                         t3_link_start(&p->phy, mac, &p->link_config);
2710                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2711                         t3_port_intr_enable(adapter, p->port_id);
2712                         p->mac.stats.num_resets++;
2713                 }
2714         }
2715         rtnl_unlock();
2716 }
2717 
2718 
2719 static void t3_adap_check_task(struct work_struct *work)
2720 {
2721         struct adapter *adapter = container_of(work, struct adapter,
2722                                                adap_check_task.work);
2723         const struct adapter_params *p = &adapter->params;
2724         int port;
2725         unsigned int v, status, reset;
2726 
2727         adapter->check_task_cnt++;
2728 
2729         check_link_status(adapter);
2730 
2731         /* Accumulate MAC stats if needed */
2732         if (!p->linkpoll_period ||
2733             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2734             p->stats_update_period) {
2735                 mac_stats_update(adapter);
2736                 adapter->check_task_cnt = 0;
2737         }
2738 
2739         if (p->rev == T3_REV_B2)
2740                 check_t3b2_mac(adapter);
2741 
2742         /*
2743          * Scan the XGMAC's to check for various conditions which we want to
2744          * monitor in a periodic polling manner rather than via an interrupt
2745          * condition.  This is used for conditions which would otherwise flood
2746          * the system with interrupts and we only really need to know that the
2747          * conditions are "happening" ...  For each condition we count the
2748          * detection of the condition and reset it for the next polling loop.
2749          */
2750         for_each_port(adapter, port) {
2751                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2752                 u32 cause;
2753 
2754                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2755                 reset = 0;
2756                 if (cause & F_RXFIFO_OVERFLOW) {
2757                         mac->stats.rx_fifo_ovfl++;
2758                         reset |= F_RXFIFO_OVERFLOW;
2759                 }
2760 
2761                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2762         }
2763 
2764         /*
2765          * We do the same as above for FL_EMPTY interrupts.
2766          */
2767         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2768         reset = 0;
2769 
2770         if (status & F_FLEMPTY) {
2771                 struct sge_qset *qs = &adapter->sge.qs[0];
2772                 int i = 0;
2773 
2774                 reset |= F_FLEMPTY;
2775 
2776                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2777                     0xffff;
2778 
2779                 while (v) {
2780                         qs->fl[i].empty += (v & 1);
2781                         if (i)
2782                                 qs++;
2783                         i ^= 1;
2784                         v >>= 1;
2785                 }
2786         }
2787 
2788         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2789 
2790         /* Schedule the next check update if any port is active. */
2791         spin_lock_irq(&adapter->work_lock);
2792         if (adapter->open_device_map & PORT_MASK)
2793                 schedule_chk_task(adapter);
2794         spin_unlock_irq(&adapter->work_lock);
2795 }
2796 
2797 static void db_full_task(struct work_struct *work)
2798 {
2799         struct adapter *adapter = container_of(work, struct adapter,
2800                                                db_full_task);
2801 
2802         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2803 }
2804 
2805 static void db_empty_task(struct work_struct *work)
2806 {
2807         struct adapter *adapter = container_of(work, struct adapter,
2808                                                db_empty_task);
2809 
2810         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2811 }
2812 
2813 static void db_drop_task(struct work_struct *work)
2814 {
2815         struct adapter *adapter = container_of(work, struct adapter,
2816                                                db_drop_task);
2817         unsigned long delay = 1000;
2818         unsigned short r;
2819 
2820         cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2821 
2822         /*
2823          * Sleep a while before ringing the driver qset dbs.
2824          * The delay is between 1000-2023 usecs.
2825          */
2826         get_random_bytes(&r, 2);
2827         delay += r & 1023;
2828         set_current_state(TASK_UNINTERRUPTIBLE);
2829         schedule_timeout(usecs_to_jiffies(delay));
2830         ring_dbs(adapter);
2831 }
2832 
2833 /*
2834  * Processes external (PHY) interrupts in process context.
2835  */
2836 static void ext_intr_task(struct work_struct *work)
2837 {
2838         struct adapter *adapter = container_of(work, struct adapter,
2839                                                ext_intr_handler_task);
2840         int i;
2841 
2842         /* Disable link fault interrupts */
2843         for_each_port(adapter, i) {
2844                 struct net_device *dev = adapter->port[i];
2845                 struct port_info *p = netdev_priv(dev);
2846 
2847                 t3_xgm_intr_disable(adapter, i);
2848                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2849         }
2850 
2851         /* Re-enable link fault interrupts */
2852         t3_phy_intr_handler(adapter);
2853 
2854         for_each_port(adapter, i)
2855                 t3_xgm_intr_enable(adapter, i);
2856 
2857         /* Now reenable external interrupts */
2858         spin_lock_irq(&adapter->work_lock);
2859         if (adapter->slow_intr_mask) {
2860                 adapter->slow_intr_mask |= F_T3DBG;
2861                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2862                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2863                              adapter->slow_intr_mask);
2864         }
2865         spin_unlock_irq(&adapter->work_lock);
2866 }
2867 
2868 /*
2869  * Interrupt-context handler for external (PHY) interrupts.
2870  */
2871 void t3_os_ext_intr_handler(struct adapter *adapter)
2872 {
2873         /*
2874          * Schedule a task to handle external interrupts as they may be slow
2875          * and we use a mutex to protect MDIO registers.  We disable PHY
2876          * interrupts in the meantime and let the task reenable them when
2877          * it's done.
2878          */
2879         spin_lock(&adapter->work_lock);
2880         if (adapter->slow_intr_mask) {
2881                 adapter->slow_intr_mask &= ~F_T3DBG;
2882                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2883                              adapter->slow_intr_mask);
2884                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2885         }
2886         spin_unlock(&adapter->work_lock);
2887 }
2888 
2889 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2890 {
2891         struct net_device *netdev = adapter->port[port_id];
2892         struct port_info *pi = netdev_priv(netdev);
2893 
2894         spin_lock(&adapter->work_lock);
2895         pi->link_fault = 1;
2896         spin_unlock(&adapter->work_lock);
2897 }
2898 
2899 static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2900 {
2901         int i, ret = 0;
2902 
2903         if (is_offload(adapter) &&
2904             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2905                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2906                 offload_close(&adapter->tdev);
2907         }
2908 
2909         /* Stop all ports */
2910         for_each_port(adapter, i) {
2911                 struct net_device *netdev = adapter->port[i];
2912 
2913                 if (netif_running(netdev))
2914                         __cxgb_close(netdev, on_wq);
2915         }
2916 
2917         /* Stop SGE timers */
2918         t3_stop_sge_timers(adapter);
2919 
2920         adapter->flags &= ~FULL_INIT_DONE;
2921 
2922         if (reset)
2923                 ret = t3_reset_adapter(adapter);
2924 
2925         pci_disable_device(adapter->pdev);
2926 
2927         return ret;
2928 }
2929 
2930 static int t3_reenable_adapter(struct adapter *adapter)
2931 {
2932         if (pci_enable_device(adapter->pdev)) {
2933                 dev_err(&adapter->pdev->dev,
2934                         "Cannot re-enable PCI device after reset.\n");
2935                 goto err;
2936         }
2937         pci_set_master(adapter->pdev);
2938         pci_restore_state(adapter->pdev);
2939         pci_save_state(adapter->pdev);
2940 
2941         /* Free sge resources */
2942         t3_free_sge_resources(adapter);
2943 
2944         if (t3_replay_prep_adapter(adapter))
2945                 goto err;
2946 
2947         return 0;
2948 err:
2949         return -1;
2950 }
2951 
2952 static void t3_resume_ports(struct adapter *adapter)
2953 {
2954         int i;
2955 
2956         /* Restart the ports */
2957         for_each_port(adapter, i) {
2958                 struct net_device *netdev = adapter->port[i];
2959 
2960                 if (netif_running(netdev)) {
2961                         if (cxgb_open(netdev)) {
2962                                 dev_err(&adapter->pdev->dev,
2963                                         "can't bring device back up"
2964                                         " after reset\n");
2965                                 continue;
2966                         }
2967                 }
2968         }
2969 
2970         if (is_offload(adapter) && !ofld_disable)
2971                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2972 }
2973 
2974 /*
2975  * processes a fatal error.
2976  * Bring the ports down, reset the chip, bring the ports back up.
2977  */
2978 static void fatal_error_task(struct work_struct *work)
2979 {
2980         struct adapter *adapter = container_of(work, struct adapter,
2981                                                fatal_error_handler_task);
2982         int err = 0;
2983 
2984         rtnl_lock();
2985         err = t3_adapter_error(adapter, 1, 1);
2986         if (!err)
2987                 err = t3_reenable_adapter(adapter);
2988         if (!err)
2989                 t3_resume_ports(adapter);
2990 
2991         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2992         rtnl_unlock();
2993 }
2994 
2995 void t3_fatal_err(struct adapter *adapter)
2996 {
2997         unsigned int fw_status[4];
2998 
2999         if (adapter->flags & FULL_INIT_DONE) {
3000                 t3_sge_stop(adapter);
3001                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3002                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3003                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3004                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3005 
3006                 spin_lock(&adapter->work_lock);
3007                 t3_intr_disable(adapter);
3008                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3009                 spin_unlock(&adapter->work_lock);
3010         }
3011         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3012         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3013                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3014                          fw_status[0], fw_status[1],
3015                          fw_status[2], fw_status[3]);
3016 }
3017 
3018 /**
3019  * t3_io_error_detected - called when PCI error is detected
3020  * @pdev: Pointer to PCI device
3021  * @state: The current pci connection state
3022  *
3023  * This function is called after a PCI bus error affecting
3024  * this device has been detected.
3025  */
3026 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3027                                              pci_channel_state_t state)
3028 {
3029         struct adapter *adapter = pci_get_drvdata(pdev);
3030 
3031         if (state == pci_channel_io_perm_failure)
3032                 return PCI_ERS_RESULT_DISCONNECT;
3033 
3034         t3_adapter_error(adapter, 0, 0);
3035 
3036         /* Request a slot reset. */
3037         return PCI_ERS_RESULT_NEED_RESET;
3038 }
3039 
3040 /**
3041  * t3_io_slot_reset - called after the pci bus has been reset.
3042  * @pdev: Pointer to PCI device
3043  *
3044  * Restart the card from scratch, as if from a cold-boot.
3045  */
3046 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3047 {
3048         struct adapter *adapter = pci_get_drvdata(pdev);
3049 
3050         if (!t3_reenable_adapter(adapter))
3051                 return PCI_ERS_RESULT_RECOVERED;
3052 
3053         return PCI_ERS_RESULT_DISCONNECT;
3054 }
3055 
3056 /**
3057  * t3_io_resume - called when traffic can start flowing again.
3058  * @pdev: Pointer to PCI device
3059  *
3060  * This callback is called when the error recovery driver tells us that
3061  * its OK to resume normal operation.
3062  */
3063 static void t3_io_resume(struct pci_dev *pdev)
3064 {
3065         struct adapter *adapter = pci_get_drvdata(pdev);
3066 
3067         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3068                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
3069 
3070         rtnl_lock();
3071         t3_resume_ports(adapter);
3072         rtnl_unlock();
3073 }
3074 
3075 static const struct pci_error_handlers t3_err_handler = {
3076         .error_detected = t3_io_error_detected,
3077         .slot_reset = t3_io_slot_reset,
3078         .resume = t3_io_resume,
3079 };
3080 
3081 /*
3082  * Set the number of qsets based on the number of CPUs and the number of ports,
3083  * not to exceed the number of available qsets, assuming there are enough qsets
3084  * per port in HW.
3085  */
3086 static void set_nqsets(struct adapter *adap)
3087 {
3088         int i, j = 0;
3089         int num_cpus = netif_get_num_default_rss_queues();
3090         int hwports = adap->params.nports;
3091         int nqsets = adap->msix_nvectors - 1;
3092 
3093         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3094                 if (hwports == 2 &&
3095                     (hwports * nqsets > SGE_QSETS ||
3096                      num_cpus >= nqsets / hwports))
3097                         nqsets /= hwports;
3098                 if (nqsets > num_cpus)
3099                         nqsets = num_cpus;
3100                 if (nqsets < 1 || hwports == 4)
3101                         nqsets = 1;
3102         } else
3103                 nqsets = 1;
3104 
3105         for_each_port(adap, i) {
3106                 struct port_info *pi = adap2pinfo(adap, i);
3107 
3108                 pi->first_qset = j;
3109                 pi->nqsets = nqsets;
3110                 j = pi->first_qset + nqsets;
3111 
3112                 dev_info(&adap->pdev->dev,
3113                          "Port %d using %d queue sets.\n", i, nqsets);
3114         }
3115 }
3116 
3117 static int cxgb_enable_msix(struct adapter *adap)
3118 {
3119         struct msix_entry entries[SGE_QSETS + 1];
3120         int vectors;
3121         int i;
3122 
3123         vectors = ARRAY_SIZE(entries);
3124         for (i = 0; i < vectors; ++i)
3125                 entries[i].entry = i;
3126 
3127         vectors = pci_enable_msix_range(adap->pdev, entries,
3128                                         adap->params.nports + 1, vectors);
3129         if (vectors < 0)
3130                 return vectors;
3131 
3132         for (i = 0; i < vectors; ++i)
3133                 adap->msix_info[i].vec = entries[i].vector;
3134         adap->msix_nvectors = vectors;
3135 
3136         return 0;
3137 }
3138 
3139 static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3140 {
3141         static const char *pci_variant[] = {
3142                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3143         };
3144 
3145         int i;
3146         char buf[80];
3147 
3148         if (is_pcie(adap))
3149                 snprintf(buf, sizeof(buf), "%s x%d",
3150                          pci_variant[adap->params.pci.variant],
3151                          adap->params.pci.width);
3152         else
3153                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3154                          pci_variant[adap->params.pci.variant],
3155                          adap->params.pci.speed, adap->params.pci.width);
3156 
3157         for_each_port(adap, i) {
3158                 struct net_device *dev = adap->port[i];
3159                 const struct port_info *pi = netdev_priv(dev);
3160 
3161                 if (!test_bit(i, &adap->registered_device_map))
3162                         continue;
3163                 netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3164                             ai->desc, pi->phy.desc,
3165                             is_offload(adap) ? "R" : "", adap->params.rev, buf,
3166                             (adap->flags & USING_MSIX) ? " MSI-X" :
3167                             (adap->flags & USING_MSI) ? " MSI" : "");
3168                 if (adap->name == dev->name && adap->params.vpd.mclk)
3169                         pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3170                                adap->name, t3_mc7_size(&adap->cm) >> 20,
3171                                t3_mc7_size(&adap->pmtx) >> 20,
3172                                t3_mc7_size(&adap->pmrx) >> 20,
3173                                adap->params.vpd.sn);
3174         }
3175 }
3176 
3177 static const struct net_device_ops cxgb_netdev_ops = {
3178         .ndo_open               = cxgb_open,
3179         .ndo_stop               = cxgb_close,
3180         .ndo_start_xmit         = t3_eth_xmit,
3181         .ndo_get_stats          = cxgb_get_stats,
3182         .ndo_validate_addr      = eth_validate_addr,
3183         .ndo_set_rx_mode        = cxgb_set_rxmode,
3184         .ndo_do_ioctl           = cxgb_ioctl,
3185         .ndo_change_mtu         = cxgb_change_mtu,
3186         .ndo_set_mac_address    = cxgb_set_mac_addr,
3187         .ndo_fix_features       = cxgb_fix_features,
3188         .ndo_set_features       = cxgb_set_features,
3189 #ifdef CONFIG_NET_POLL_CONTROLLER
3190         .ndo_poll_controller    = cxgb_netpoll,
3191 #endif
3192 };
3193 
3194 static void cxgb3_init_iscsi_mac(struct net_device *dev)
3195 {
3196         struct port_info *pi = netdev_priv(dev);
3197 
3198         memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3199         pi->iscsic.mac_addr[3] |= 0x80;
3200 }
3201 
3202 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3203 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3204                         NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3205 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3206 {
3207         int i, err, pci_using_dac = 0;
3208         resource_size_t mmio_start, mmio_len;
3209         const struct adapter_info *ai;
3210         struct adapter *adapter = NULL;
3211         struct port_info *pi;
3212 
3213         pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3214 
3215         if (!cxgb3_wq) {
3216                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3217                 if (!cxgb3_wq) {
3218                         pr_err("cannot initialize work queue\n");
3219                         return -ENOMEM;
3220                 }
3221         }
3222 
3223         err = pci_enable_device(pdev);
3224         if (err) {
3225                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3226                 goto out;
3227         }
3228 
3229         err = pci_request_regions(pdev, DRV_NAME);
3230         if (err) {
3231                 /* Just info, some other driver may have claimed the device. */
3232                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3233                 goto out_disable_device;
3234         }
3235 
3236         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3237                 pci_using_dac = 1;
3238                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3239                 if (err) {
3240                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3241                                "coherent allocations\n");
3242                         goto out_release_regions;
3243                 }
3244         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3245                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3246                 goto out_release_regions;
3247         }
3248 
3249         pci_set_master(pdev);
3250         pci_save_state(pdev);
3251 
3252         mmio_start = pci_resource_start(pdev, 0);
3253         mmio_len = pci_resource_len(pdev, 0);
3254         ai = t3_get_adapter_info(ent->driver_data);
3255 
3256         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3257         if (!adapter) {
3258                 err = -ENOMEM;
3259                 goto out_release_regions;
3260         }
3261 
3262         adapter->nofail_skb =
3263                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3264         if (!adapter->nofail_skb) {
3265                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3266                 err = -ENOMEM;
3267                 goto out_free_adapter;
3268         }
3269 
3270         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3271         if (!adapter->regs) {
3272                 dev_err(&pdev->dev, "cannot map device registers\n");
3273                 err = -ENOMEM;
3274                 goto out_free_adapter_nofail;
3275         }
3276 
3277         adapter->pdev = pdev;
3278         adapter->name = pci_name(pdev);
3279         adapter->msg_enable = dflt_msg_enable;
3280         adapter->mmio_len = mmio_len;
3281 
3282         mutex_init(&adapter->mdio_lock);
3283         spin_lock_init(&adapter->work_lock);
3284         spin_lock_init(&adapter->stats_lock);
3285 
3286         INIT_LIST_HEAD(&adapter->adapter_list);
3287         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3288         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3289 
3290         INIT_WORK(&adapter->db_full_task, db_full_task);
3291         INIT_WORK(&adapter->db_empty_task, db_empty_task);
3292         INIT_WORK(&adapter->db_drop_task, db_drop_task);
3293 
3294         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3295 
3296         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3297                 struct net_device *netdev;
3298 
3299                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3300                 if (!netdev) {
3301                         err = -ENOMEM;
3302                         goto out_free_dev;
3303                 }
3304 
3305                 SET_NETDEV_DEV(netdev, &pdev->dev);
3306 
3307                 adapter->port[i] = netdev;
3308                 pi = netdev_priv(netdev);
3309                 pi->adapter = adapter;
3310                 pi->port_id = i;
3311                 netif_carrier_off(netdev);
3312                 netdev->irq = pdev->irq;
3313                 netdev->mem_start = mmio_start;
3314                 netdev->mem_end = mmio_start + mmio_len - 1;
3315                 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3316                         NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3317                 netdev->features |= netdev->hw_features |
3318                                     NETIF_F_HW_VLAN_CTAG_TX;
3319                 netdev->vlan_features |= netdev->features & VLAN_FEAT;
3320                 if (pci_using_dac)
3321                         netdev->features |= NETIF_F_HIGHDMA;
3322 
3323                 netdev->netdev_ops = &cxgb_netdev_ops;
3324                 netdev->ethtool_ops = &cxgb_ethtool_ops;
3325                 netdev->min_mtu = 81;
3326                 netdev->max_mtu = ETH_MAX_MTU;
3327                 netdev->dev_port = pi->port_id;
3328         }
3329 
3330         pci_set_drvdata(pdev, adapter);
3331         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3332                 err = -ENODEV;
3333                 goto out_free_dev;
3334         }
3335 
3336         /*
3337          * The card is now ready to go.  If any errors occur during device
3338          * registration we do not fail the whole card but rather proceed only
3339          * with the ports we manage to register successfully.  However we must
3340          * register at least one net device.
3341          */
3342         for_each_port(adapter, i) {
3343                 err = register_netdev(adapter->port[i]);
3344                 if (err)
3345                         dev_warn(&pdev->dev,
3346                                  "cannot register net device %s, skipping\n",
3347                                  adapter->port[i]->name);
3348                 else {
3349                         /*
3350                          * Change the name we use for messages to the name of
3351                          * the first successfully registered interface.
3352                          */
3353                         if (!adapter->registered_device_map)
3354                                 adapter->name = adapter->port[i]->name;
3355 
3356                         __set_bit(i, &adapter->registered_device_map);
3357                 }
3358         }
3359         if (!adapter->registered_device_map) {
3360                 dev_err(&pdev->dev, "could not register any net devices\n");
3361                 goto out_free_dev;
3362         }
3363 
3364         for_each_port(adapter, i)
3365                 cxgb3_init_iscsi_mac(adapter->port[i]);
3366 
3367         /* Driver's ready. Reflect it on LEDs */
3368         t3_led_ready(adapter);
3369 
3370         if (is_offload(adapter)) {
3371                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3372                 cxgb3_adapter_ofld(adapter);
3373         }
3374 
3375         /* See what interrupts we'll be using */
3376         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3377                 adapter->flags |= USING_MSIX;
3378         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3379                 adapter->flags |= USING_MSI;
3380 
3381         set_nqsets(adapter);
3382 
3383         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3384                                  &cxgb3_attr_group);
3385         if (err) {
3386                 dev_err(&pdev->dev, "cannot create sysfs group\n");
3387                 goto out_close_led;
3388         }
3389 
3390         print_port_info(adapter, ai);
3391         return 0;
3392 
3393 out_close_led:
3394         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3395 
3396 out_free_dev:
3397         iounmap(adapter->regs);
3398         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3399                 if (adapter->port[i])
3400                         free_netdev(adapter->port[i]);
3401 
3402 out_free_adapter_nofail:
3403         kfree_skb(adapter->nofail_skb);
3404 
3405 out_free_adapter:
3406         kfree(adapter);
3407 
3408 out_release_regions:
3409         pci_release_regions(pdev);
3410 out_disable_device:
3411         pci_disable_device(pdev);
3412 out:
3413         return err;
3414 }
3415 
3416 static void remove_one(struct pci_dev *pdev)
3417 {
3418         struct adapter *adapter = pci_get_drvdata(pdev);
3419 
3420         if (adapter) {
3421                 int i;
3422 
3423                 t3_sge_stop(adapter);
3424                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3425                                    &cxgb3_attr_group);
3426 
3427                 if (is_offload(adapter)) {
3428                         cxgb3_adapter_unofld(adapter);
3429                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3430                                      &adapter->open_device_map))
3431                                 offload_close(&adapter->tdev);
3432                 }
3433 
3434                 for_each_port(adapter, i)
3435                     if (test_bit(i, &adapter->registered_device_map))
3436                         unregister_netdev(adapter->port[i]);
3437 
3438                 t3_stop_sge_timers(adapter);
3439                 t3_free_sge_resources(adapter);
3440                 cxgb_disable_msi(adapter);
3441 
3442                 for_each_port(adapter, i)
3443                         if (adapter->port[i])
3444                                 free_netdev(adapter->port[i]);
3445 
3446                 iounmap(adapter->regs);
3447                 kfree_skb(adapter->nofail_skb);
3448                 kfree(adapter);
3449                 pci_release_regions(pdev);
3450                 pci_disable_device(pdev);
3451         }
3452 }
3453 
3454 static struct pci_driver driver = {
3455         .name = DRV_NAME,
3456         .id_table = cxgb3_pci_tbl,
3457         .probe = init_one,
3458         .remove = remove_one,
3459         .err_handler = &t3_err_handler,
3460 };
3461 
3462 static int __init cxgb3_init_module(void)
3463 {
3464         int ret;
3465 
3466         cxgb3_offload_init();
3467 
3468         ret = pci_register_driver(&driver);
3469         return ret;
3470 }
3471 
3472 static void __exit cxgb3_cleanup_module(void)
3473 {
3474         pci_unregister_driver(&driver);
3475         if (cxgb3_wq)
3476                 destroy_workqueue(cxgb3_wq);
3477 }
3478 
3479 module_init(cxgb3_init_module);
3480 module_exit(cxgb3_cleanup_module);

/* [<][>][^][v][top][bottom][index][help] */