root/drivers/net/ethernet/brocade/bna/bnad.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. bnad_cq_cleanup
  2. bnad_tx_buff_unmap
  3. bnad_txq_cleanup
  4. bnad_txcmpl_process
  5. bnad_tx_complete
  6. bnad_msix_tx
  7. bnad_rxq_alloc_uninit
  8. bnad_rxq_alloc_init
  9. bnad_rxq_cleanup_page
  10. bnad_rxq_cleanup_skb
  11. bnad_rxq_cleanup
  12. bnad_rxq_refill_page
  13. bnad_rxq_refill_skb
  14. bnad_rxq_post
  15. bnad_cq_drop_packet
  16. bnad_cq_setup_skb_frags
  17. bnad_cq_setup_skb
  18. bnad_cq_process
  19. bnad_netif_rx_schedule_poll
  20. bnad_msix_rx
  21. bnad_msix_mbox_handler
  22. bnad_isr
  23. bnad_enable_mbox_irq
  24. bnad_disable_mbox_irq
  25. bnad_set_netdev_perm_addr
  26. bnad_cb_mbox_intr_enable
  27. bnad_cb_mbox_intr_disable
  28. bnad_cb_ioceth_ready
  29. bnad_cb_ioceth_failed
  30. bnad_cb_ioceth_disabled
  31. bnad_cb_enet_disabled
  32. bnad_cb_ethport_link_status
  33. bnad_cb_tx_disabled
  34. bnad_cb_tcb_setup
  35. bnad_cb_tcb_destroy
  36. bnad_cb_ccb_setup
  37. bnad_cb_ccb_destroy
  38. bnad_cb_tx_stall
  39. bnad_cb_tx_resume
  40. bnad_tx_cleanup
  41. bnad_cb_tx_cleanup
  42. bnad_cb_rx_stall
  43. bnad_rx_cleanup
  44. bnad_cb_rx_cleanup
  45. bnad_cb_rx_post
  46. bnad_cb_rx_disabled
  47. bnad_cb_rx_mcast_add
  48. bnad_cb_stats_get
  49. bnad_cb_enet_mtu_set
  50. bnad_cb_completion
  51. bnad_mem_free
  52. bnad_mem_alloc
  53. bnad_mbox_irq_free
  54. bnad_mbox_irq_alloc
  55. bnad_txrx_irq_free
  56. bnad_txrx_irq_alloc
  57. bnad_tx_msix_unregister
  58. bnad_tx_msix_register
  59. bnad_rx_msix_unregister
  60. bnad_rx_msix_register
  61. bnad_tx_res_free
  62. bnad_tx_res_alloc
  63. bnad_rx_res_free
  64. bnad_rx_res_alloc
  65. bnad_ioc_timeout
  66. bnad_ioc_hb_check
  67. bnad_iocpf_timeout
  68. bnad_iocpf_sem_timeout
  69. bnad_dim_timeout
  70. bnad_stats_timeout
  71. bnad_dim_timer_start
  72. bnad_stats_timer_start
  73. bnad_stats_timer_stop
  74. bnad_netdev_mc_list_get
  75. bnad_napi_poll_rx
  76. bnad_napi_add
  77. bnad_napi_delete
  78. bnad_destroy_tx
  79. bnad_setup_tx
  80. bnad_init_rx_config
  81. bnad_rx_ctrl_init
  82. bnad_reinit_rx
  83. bnad_destroy_rx
  84. bnad_setup_rx
  85. bnad_tx_coalescing_timeo_set
  86. bnad_rx_coalescing_timeo_set
  87. bnad_mac_addr_set_locked
  88. bnad_enable_default_bcast
  89. bnad_restore_vlans
  90. bnad_netdev_qstats_fill
  91. bnad_netdev_hwstats_fill
  92. bnad_mbox_irq_sync
  93. bnad_tso_prepare
  94. bnad_q_num_init
  95. bnad_q_num_adjust
  96. bnad_ioceth_disable
  97. bnad_ioceth_enable
  98. bnad_res_free
  99. bnad_res_alloc
  100. bnad_enable_msix
  101. bnad_disable_msix
  102. bnad_open
  103. bnad_stop
  104. bnad_txq_wi_prepare
  105. bnad_start_xmit
  106. bnad_get_stats64
  107. bnad_set_rx_ucast_fltr
  108. bnad_set_rx_mcast_fltr
  109. bnad_set_rx_mode
  110. bnad_set_mac_address
  111. bnad_mtu_set
  112. bnad_change_mtu
  113. bnad_vlan_rx_add_vid
  114. bnad_vlan_rx_kill_vid
  115. bnad_set_features
  116. bnad_netpoll
  117. bnad_netdev_init
  118. bnad_init
  119. bnad_uninit
  120. bnad_lock_init
  121. bnad_lock_uninit
  122. bnad_pci_init
  123. bnad_pci_uninit
  124. bnad_pci_probe
  125. bnad_pci_remove
  126. bnad_module_init
  127. bnad_module_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Linux network driver for QLogic BR-series Converged Network Adapter.
   4  */
   5 /*
   6  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   7  * Copyright (c) 2014-2015 QLogic Corporation
   8  * All rights reserved
   9  * www.qlogic.com
  10  */
  11 #include <linux/bitops.h>
  12 #include <linux/netdevice.h>
  13 #include <linux/skbuff.h>
  14 #include <linux/etherdevice.h>
  15 #include <linux/in.h>
  16 #include <linux/ethtool.h>
  17 #include <linux/if_vlan.h>
  18 #include <linux/if_ether.h>
  19 #include <linux/ip.h>
  20 #include <linux/prefetch.h>
  21 #include <linux/module.h>
  22 
  23 #include "bnad.h"
  24 #include "bna.h"
  25 #include "cna.h"
  26 
  27 static DEFINE_MUTEX(bnad_fwimg_mutex);
  28 
  29 /*
  30  * Module params
  31  */
  32 static uint bnad_msix_disable;
  33 module_param(bnad_msix_disable, uint, 0444);
  34 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
  35 
  36 static uint bnad_ioc_auto_recover = 1;
  37 module_param(bnad_ioc_auto_recover, uint, 0444);
  38 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  39 
  40 static uint bna_debugfs_enable = 1;
  41 module_param(bna_debugfs_enable, uint, 0644);
  42 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
  43                  " Range[false:0|true:1]");
  44 
  45 /*
  46  * Global variables
  47  */
  48 static u32 bnad_rxqs_per_cq = 2;
  49 static atomic_t bna_id;
  50 static const u8 bnad_bcast_addr[] __aligned(2) =
  51         { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  52 
  53 /*
  54  * Local MACROS
  55  */
  56 #define BNAD_GET_MBOX_IRQ(_bnad)                                \
  57         (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?                  \
  58          ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
  59          ((_bnad)->pcidev->irq))
  60 
  61 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)        \
  62 do {                                                            \
  63         (_res_info)->res_type = BNA_RES_T_MEM;                  \
  64         (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
  65         (_res_info)->res_u.mem_info.num = (_num);               \
  66         (_res_info)->res_u.mem_info.len = (_size);              \
  67 } while (0)
  68 
  69 /*
  70  * Reinitialize completions in CQ, once Rx is taken down
  71  */
  72 static void
  73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
  74 {
  75         struct bna_cq_entry *cmpl;
  76         int i;
  77 
  78         for (i = 0; i < ccb->q_depth; i++) {
  79                 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
  80                 cmpl->valid = 0;
  81         }
  82 }
  83 
  84 /* Tx Datapath functions */
  85 
  86 
  87 /* Caller should ensure that the entry at unmap_q[index] is valid */
  88 static u32
  89 bnad_tx_buff_unmap(struct bnad *bnad,
  90                               struct bnad_tx_unmap *unmap_q,
  91                               u32 q_depth, u32 index)
  92 {
  93         struct bnad_tx_unmap *unmap;
  94         struct sk_buff *skb;
  95         int vector, nvecs;
  96 
  97         unmap = &unmap_q[index];
  98         nvecs = unmap->nvecs;
  99 
 100         skb = unmap->skb;
 101         unmap->skb = NULL;
 102         unmap->nvecs = 0;
 103         dma_unmap_single(&bnad->pcidev->dev,
 104                 dma_unmap_addr(&unmap->vectors[0], dma_addr),
 105                 skb_headlen(skb), DMA_TO_DEVICE);
 106         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
 107         nvecs--;
 108 
 109         vector = 0;
 110         while (nvecs) {
 111                 vector++;
 112                 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
 113                         vector = 0;
 114                         BNA_QE_INDX_INC(index, q_depth);
 115                         unmap = &unmap_q[index];
 116                 }
 117 
 118                 dma_unmap_page(&bnad->pcidev->dev,
 119                         dma_unmap_addr(&unmap->vectors[vector], dma_addr),
 120                         dma_unmap_len(&unmap->vectors[vector], dma_len),
 121                         DMA_TO_DEVICE);
 122                 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
 123                 nvecs--;
 124         }
 125 
 126         BNA_QE_INDX_INC(index, q_depth);
 127 
 128         return index;
 129 }
 130 
 131 /*
 132  * Frees all pending Tx Bufs
 133  * At this point no activity is expected on the Q,
 134  * so DMA unmap & freeing is fine.
 135  */
 136 static void
 137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
 138 {
 139         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 140         struct sk_buff *skb;
 141         int i;
 142 
 143         for (i = 0; i < tcb->q_depth; i++) {
 144                 skb = unmap_q[i].skb;
 145                 if (!skb)
 146                         continue;
 147                 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
 148 
 149                 dev_kfree_skb_any(skb);
 150         }
 151 }
 152 
 153 /*
 154  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
 155  * Can be called in a) Interrupt context
 156  *                  b) Sending context
 157  */
 158 static u32
 159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
 160 {
 161         u32 sent_packets = 0, sent_bytes = 0;
 162         u32 wis, unmap_wis, hw_cons, cons, q_depth;
 163         struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
 164         struct bnad_tx_unmap *unmap;
 165         struct sk_buff *skb;
 166 
 167         /* Just return if TX is stopped */
 168         if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 169                 return 0;
 170 
 171         hw_cons = *(tcb->hw_consumer_index);
 172         rmb();
 173         cons = tcb->consumer_index;
 174         q_depth = tcb->q_depth;
 175 
 176         wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
 177         BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
 178 
 179         while (wis) {
 180                 unmap = &unmap_q[cons];
 181 
 182                 skb = unmap->skb;
 183 
 184                 sent_packets++;
 185                 sent_bytes += skb->len;
 186 
 187                 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
 188                 wis -= unmap_wis;
 189 
 190                 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
 191                 dev_kfree_skb_any(skb);
 192         }
 193 
 194         /* Update consumer pointers. */
 195         tcb->consumer_index = hw_cons;
 196 
 197         tcb->txq->tx_packets += sent_packets;
 198         tcb->txq->tx_bytes += sent_bytes;
 199 
 200         return sent_packets;
 201 }
 202 
 203 static u32
 204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
 205 {
 206         struct net_device *netdev = bnad->netdev;
 207         u32 sent = 0;
 208 
 209         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
 210                 return 0;
 211 
 212         sent = bnad_txcmpl_process(bnad, tcb);
 213         if (sent) {
 214                 if (netif_queue_stopped(netdev) &&
 215                     netif_carrier_ok(netdev) &&
 216                     BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
 217                                     BNAD_NETIF_WAKE_THRESHOLD) {
 218                         if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
 219                                 netif_wake_queue(netdev);
 220                                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
 221                         }
 222                 }
 223         }
 224 
 225         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
 226                 bna_ib_ack(tcb->i_dbell, sent);
 227 
 228         smp_mb__before_atomic();
 229         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
 230 
 231         return sent;
 232 }
 233 
 234 /* MSIX Tx Completion Handler */
 235 static irqreturn_t
 236 bnad_msix_tx(int irq, void *data)
 237 {
 238         struct bna_tcb *tcb = (struct bna_tcb *)data;
 239         struct bnad *bnad = tcb->bnad;
 240 
 241         bnad_tx_complete(bnad, tcb);
 242 
 243         return IRQ_HANDLED;
 244 }
 245 
 246 static inline void
 247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
 248 {
 249         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 250 
 251         unmap_q->reuse_pi = -1;
 252         unmap_q->alloc_order = -1;
 253         unmap_q->map_size = 0;
 254         unmap_q->type = BNAD_RXBUF_NONE;
 255 }
 256 
 257 /* Default is page-based allocation. Multi-buffer support - TBD */
 258 static int
 259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
 260 {
 261         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 262         int order;
 263 
 264         bnad_rxq_alloc_uninit(bnad, rcb);
 265 
 266         order = get_order(rcb->rxq->buffer_size);
 267 
 268         unmap_q->type = BNAD_RXBUF_PAGE;
 269 
 270         if (bna_is_small_rxq(rcb->id)) {
 271                 unmap_q->alloc_order = 0;
 272                 unmap_q->map_size = rcb->rxq->buffer_size;
 273         } else {
 274                 if (rcb->rxq->multi_buffer) {
 275                         unmap_q->alloc_order = 0;
 276                         unmap_q->map_size = rcb->rxq->buffer_size;
 277                         unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
 278                 } else {
 279                         unmap_q->alloc_order = order;
 280                         unmap_q->map_size =
 281                                 (rcb->rxq->buffer_size > 2048) ?
 282                                 PAGE_SIZE << order : 2048;
 283                 }
 284         }
 285 
 286         BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
 287 
 288         return 0;
 289 }
 290 
 291 static inline void
 292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 293 {
 294         if (!unmap->page)
 295                 return;
 296 
 297         dma_unmap_page(&bnad->pcidev->dev,
 298                         dma_unmap_addr(&unmap->vector, dma_addr),
 299                         unmap->vector.len, DMA_FROM_DEVICE);
 300         put_page(unmap->page);
 301         unmap->page = NULL;
 302         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 303         unmap->vector.len = 0;
 304 }
 305 
 306 static inline void
 307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
 308 {
 309         if (!unmap->skb)
 310                 return;
 311 
 312         dma_unmap_single(&bnad->pcidev->dev,
 313                         dma_unmap_addr(&unmap->vector, dma_addr),
 314                         unmap->vector.len, DMA_FROM_DEVICE);
 315         dev_kfree_skb_any(unmap->skb);
 316         unmap->skb = NULL;
 317         dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
 318         unmap->vector.len = 0;
 319 }
 320 
 321 static void
 322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
 323 {
 324         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 325         int i;
 326 
 327         for (i = 0; i < rcb->q_depth; i++) {
 328                 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
 329 
 330                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 331                         bnad_rxq_cleanup_skb(bnad, unmap);
 332                 else
 333                         bnad_rxq_cleanup_page(bnad, unmap);
 334         }
 335         bnad_rxq_alloc_uninit(bnad, rcb);
 336 }
 337 
 338 static u32
 339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 340 {
 341         u32 alloced, prod, q_depth;
 342         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 343         struct bnad_rx_unmap *unmap, *prev;
 344         struct bna_rxq_entry *rxent;
 345         struct page *page;
 346         u32 page_offset, alloc_size;
 347         dma_addr_t dma_addr;
 348 
 349         prod = rcb->producer_index;
 350         q_depth = rcb->q_depth;
 351 
 352         alloc_size = PAGE_SIZE << unmap_q->alloc_order;
 353         alloced = 0;
 354 
 355         while (nalloc--) {
 356                 unmap = &unmap_q->unmap[prod];
 357 
 358                 if (unmap_q->reuse_pi < 0) {
 359                         page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
 360                                         unmap_q->alloc_order);
 361                         page_offset = 0;
 362                 } else {
 363                         prev = &unmap_q->unmap[unmap_q->reuse_pi];
 364                         page = prev->page;
 365                         page_offset = prev->page_offset + unmap_q->map_size;
 366                         get_page(page);
 367                 }
 368 
 369                 if (unlikely(!page)) {
 370                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 371                         rcb->rxq->rxbuf_alloc_failed++;
 372                         goto finishing;
 373                 }
 374 
 375                 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
 376                                         unmap_q->map_size, DMA_FROM_DEVICE);
 377                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
 378                         put_page(page);
 379                         BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
 380                         rcb->rxq->rxbuf_map_failed++;
 381                         goto finishing;
 382                 }
 383 
 384                 unmap->page = page;
 385                 unmap->page_offset = page_offset;
 386                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 387                 unmap->vector.len = unmap_q->map_size;
 388                 page_offset += unmap_q->map_size;
 389 
 390                 if (page_offset < alloc_size)
 391                         unmap_q->reuse_pi = prod;
 392                 else
 393                         unmap_q->reuse_pi = -1;
 394 
 395                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 396                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 397                 BNA_QE_INDX_INC(prod, q_depth);
 398                 alloced++;
 399         }
 400 
 401 finishing:
 402         if (likely(alloced)) {
 403                 rcb->producer_index = prod;
 404                 smp_mb();
 405                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 406                         bna_rxq_prod_indx_doorbell(rcb);
 407         }
 408 
 409         return alloced;
 410 }
 411 
 412 static u32
 413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
 414 {
 415         u32 alloced, prod, q_depth, buff_sz;
 416         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 417         struct bnad_rx_unmap *unmap;
 418         struct bna_rxq_entry *rxent;
 419         struct sk_buff *skb;
 420         dma_addr_t dma_addr;
 421 
 422         buff_sz = rcb->rxq->buffer_size;
 423         prod = rcb->producer_index;
 424         q_depth = rcb->q_depth;
 425 
 426         alloced = 0;
 427         while (nalloc--) {
 428                 unmap = &unmap_q->unmap[prod];
 429 
 430                 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
 431 
 432                 if (unlikely(!skb)) {
 433                         BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
 434                         rcb->rxq->rxbuf_alloc_failed++;
 435                         goto finishing;
 436                 }
 437 
 438                 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
 439                                           buff_sz, DMA_FROM_DEVICE);
 440                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
 441                         dev_kfree_skb_any(skb);
 442                         BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
 443                         rcb->rxq->rxbuf_map_failed++;
 444                         goto finishing;
 445                 }
 446 
 447                 unmap->skb = skb;
 448                 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
 449                 unmap->vector.len = buff_sz;
 450 
 451                 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
 452                 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 453                 BNA_QE_INDX_INC(prod, q_depth);
 454                 alloced++;
 455         }
 456 
 457 finishing:
 458         if (likely(alloced)) {
 459                 rcb->producer_index = prod;
 460                 smp_mb();
 461                 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
 462                         bna_rxq_prod_indx_doorbell(rcb);
 463         }
 464 
 465         return alloced;
 466 }
 467 
 468 static inline void
 469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
 470 {
 471         struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
 472         u32 to_alloc;
 473 
 474         to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
 475         if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
 476                 return;
 477 
 478         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 479                 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
 480         else
 481                 bnad_rxq_refill_page(bnad, rcb, to_alloc);
 482 }
 483 
 484 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 485                                         BNA_CQ_EF_IPV6 | \
 486                                         BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
 487                                         BNA_CQ_EF_L4_CKSUM_OK)
 488 
 489 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 490                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 491 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
 492                                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
 493 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
 494                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 495 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
 496                                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
 497 
 498 static void
 499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
 500                     u32 sop_ci, u32 nvecs)
 501 {
 502         struct bnad_rx_unmap_q *unmap_q;
 503         struct bnad_rx_unmap *unmap;
 504         u32 ci, vec;
 505 
 506         unmap_q = rcb->unmap_q;
 507         for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
 508                 unmap = &unmap_q->unmap[ci];
 509                 BNA_QE_INDX_INC(ci, rcb->q_depth);
 510 
 511                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 512                         bnad_rxq_cleanup_skb(bnad, unmap);
 513                 else
 514                         bnad_rxq_cleanup_page(bnad, unmap);
 515         }
 516 }
 517 
 518 static void
 519 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
 520 {
 521         struct bna_rcb *rcb;
 522         struct bnad *bnad;
 523         struct bnad_rx_unmap_q *unmap_q;
 524         struct bna_cq_entry *cq, *cmpl;
 525         u32 ci, pi, totlen = 0;
 526 
 527         cq = ccb->sw_q;
 528         pi = ccb->producer_index;
 529         cmpl = &cq[pi];
 530 
 531         rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
 532         unmap_q = rcb->unmap_q;
 533         bnad = rcb->bnad;
 534         ci = rcb->consumer_index;
 535 
 536         /* prefetch header */
 537         prefetch(page_address(unmap_q->unmap[ci].page) +
 538                  unmap_q->unmap[ci].page_offset);
 539 
 540         while (nvecs--) {
 541                 struct bnad_rx_unmap *unmap;
 542                 u32 len;
 543 
 544                 unmap = &unmap_q->unmap[ci];
 545                 BNA_QE_INDX_INC(ci, rcb->q_depth);
 546 
 547                 dma_unmap_page(&bnad->pcidev->dev,
 548                                dma_unmap_addr(&unmap->vector, dma_addr),
 549                                unmap->vector.len, DMA_FROM_DEVICE);
 550 
 551                 len = ntohs(cmpl->length);
 552                 skb->truesize += unmap->vector.len;
 553                 totlen += len;
 554 
 555                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 556                                    unmap->page, unmap->page_offset, len);
 557 
 558                 unmap->page = NULL;
 559                 unmap->vector.len = 0;
 560 
 561                 BNA_QE_INDX_INC(pi, ccb->q_depth);
 562                 cmpl = &cq[pi];
 563         }
 564 
 565         skb->len += totlen;
 566         skb->data_len += totlen;
 567 }
 568 
 569 static inline void
 570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
 571                   struct bnad_rx_unmap *unmap, u32 len)
 572 {
 573         prefetch(skb->data);
 574 
 575         dma_unmap_single(&bnad->pcidev->dev,
 576                         dma_unmap_addr(&unmap->vector, dma_addr),
 577                         unmap->vector.len, DMA_FROM_DEVICE);
 578 
 579         skb_put(skb, len);
 580         skb->protocol = eth_type_trans(skb, bnad->netdev);
 581 
 582         unmap->skb = NULL;
 583         unmap->vector.len = 0;
 584 }
 585 
 586 static u32
 587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
 588 {
 589         struct bna_cq_entry *cq, *cmpl, *next_cmpl;
 590         struct bna_rcb *rcb = NULL;
 591         struct bnad_rx_unmap_q *unmap_q;
 592         struct bnad_rx_unmap *unmap = NULL;
 593         struct sk_buff *skb = NULL;
 594         struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 595         struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
 596         u32 packets = 0, len = 0, totlen = 0;
 597         u32 pi, vec, sop_ci = 0, nvecs = 0;
 598         u32 flags, masked_flags;
 599 
 600         prefetch(bnad->netdev);
 601 
 602         cq = ccb->sw_q;
 603 
 604         while (packets < budget) {
 605                 cmpl = &cq[ccb->producer_index];
 606                 if (!cmpl->valid)
 607                         break;
 608                 /* The 'valid' field is set by the adapter, only after writing
 609                  * the other fields of completion entry. Hence, do not load
 610                  * other fields of completion entry *before* the 'valid' is
 611                  * loaded. Adding the rmb() here prevents the compiler and/or
 612                  * CPU from reordering the reads which would potentially result
 613                  * in reading stale values in completion entry.
 614                  */
 615                 rmb();
 616 
 617                 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
 618 
 619                 if (bna_is_small_rxq(cmpl->rxq_id))
 620                         rcb = ccb->rcb[1];
 621                 else
 622                         rcb = ccb->rcb[0];
 623 
 624                 unmap_q = rcb->unmap_q;
 625 
 626                 /* start of packet ci */
 627                 sop_ci = rcb->consumer_index;
 628 
 629                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
 630                         unmap = &unmap_q->unmap[sop_ci];
 631                         skb = unmap->skb;
 632                 } else {
 633                         skb = napi_get_frags(&rx_ctrl->napi);
 634                         if (unlikely(!skb))
 635                                 break;
 636                 }
 637                 prefetch(skb);
 638 
 639                 flags = ntohl(cmpl->flags);
 640                 len = ntohs(cmpl->length);
 641                 totlen = len;
 642                 nvecs = 1;
 643 
 644                 /* Check all the completions for this frame.
 645                  * busy-wait doesn't help much, break here.
 646                  */
 647                 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
 648                     (flags & BNA_CQ_EF_EOP) == 0) {
 649                         pi = ccb->producer_index;
 650                         do {
 651                                 BNA_QE_INDX_INC(pi, ccb->q_depth);
 652                                 next_cmpl = &cq[pi];
 653 
 654                                 if (!next_cmpl->valid)
 655                                         break;
 656                                 /* The 'valid' field is set by the adapter, only
 657                                  * after writing the other fields of completion
 658                                  * entry. Hence, do not load other fields of
 659                                  * completion entry *before* the 'valid' is
 660                                  * loaded. Adding the rmb() here prevents the
 661                                  * compiler and/or CPU from reordering the reads
 662                                  * which would potentially result in reading
 663                                  * stale values in completion entry.
 664                                  */
 665                                 rmb();
 666 
 667                                 len = ntohs(next_cmpl->length);
 668                                 flags = ntohl(next_cmpl->flags);
 669 
 670                                 nvecs++;
 671                                 totlen += len;
 672                         } while ((flags & BNA_CQ_EF_EOP) == 0);
 673 
 674                         if (!next_cmpl->valid)
 675                                 break;
 676                 }
 677                 packets++;
 678 
 679                 /* TODO: BNA_CQ_EF_LOCAL ? */
 680                 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
 681                                                 BNA_CQ_EF_FCS_ERROR |
 682                                                 BNA_CQ_EF_TOO_LONG))) {
 683                         bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
 684                         rcb->rxq->rx_packets_with_error++;
 685 
 686                         goto next;
 687                 }
 688 
 689                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 690                         bnad_cq_setup_skb(bnad, skb, unmap, len);
 691                 else
 692                         bnad_cq_setup_skb_frags(ccb, skb, nvecs);
 693 
 694                 rcb->rxq->rx_packets++;
 695                 rcb->rxq->rx_bytes += totlen;
 696                 ccb->bytes_per_intr += totlen;
 697 
 698                 masked_flags = flags & flags_cksum_prot_mask;
 699 
 700                 if (likely
 701                     ((bnad->netdev->features & NETIF_F_RXCSUM) &&
 702                      ((masked_flags == flags_tcp4) ||
 703                       (masked_flags == flags_udp4) ||
 704                       (masked_flags == flags_tcp6) ||
 705                       (masked_flags == flags_udp6))))
 706                         skb->ip_summed = CHECKSUM_UNNECESSARY;
 707                 else
 708                         skb_checksum_none_assert(skb);
 709 
 710                 if ((flags & BNA_CQ_EF_VLAN) &&
 711                     (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
 712                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
 713 
 714                 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
 715                         netif_receive_skb(skb);
 716                 else
 717                         napi_gro_frags(&rx_ctrl->napi);
 718 
 719 next:
 720                 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
 721                 for (vec = 0; vec < nvecs; vec++) {
 722                         cmpl = &cq[ccb->producer_index];
 723                         cmpl->valid = 0;
 724                         BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
 725                 }
 726         }
 727 
 728         napi_gro_flush(&rx_ctrl->napi, false);
 729         if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
 730                 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
 731 
 732         bnad_rxq_post(bnad, ccb->rcb[0]);
 733         if (ccb->rcb[1])
 734                 bnad_rxq_post(bnad, ccb->rcb[1]);
 735 
 736         return packets;
 737 }
 738 
 739 static void
 740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
 741 {
 742         struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
 743         struct napi_struct *napi = &rx_ctrl->napi;
 744 
 745         if (likely(napi_schedule_prep(napi))) {
 746                 __napi_schedule(napi);
 747                 rx_ctrl->rx_schedule++;
 748         }
 749 }
 750 
 751 /* MSIX Rx Path Handler */
 752 static irqreturn_t
 753 bnad_msix_rx(int irq, void *data)
 754 {
 755         struct bna_ccb *ccb = (struct bna_ccb *)data;
 756 
 757         if (ccb) {
 758                 ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
 759                 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
 760         }
 761 
 762         return IRQ_HANDLED;
 763 }
 764 
 765 /* Interrupt handlers */
 766 
 767 /* Mbox Interrupt Handlers */
 768 static irqreturn_t
 769 bnad_msix_mbox_handler(int irq, void *data)
 770 {
 771         u32 intr_status;
 772         unsigned long flags;
 773         struct bnad *bnad = (struct bnad *)data;
 774 
 775         spin_lock_irqsave(&bnad->bna_lock, flags);
 776         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 777                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
 778                 return IRQ_HANDLED;
 779         }
 780 
 781         bna_intr_status_get(&bnad->bna, intr_status);
 782 
 783         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 784                 bna_mbox_handler(&bnad->bna, intr_status);
 785 
 786         spin_unlock_irqrestore(&bnad->bna_lock, flags);
 787 
 788         return IRQ_HANDLED;
 789 }
 790 
 791 static irqreturn_t
 792 bnad_isr(int irq, void *data)
 793 {
 794         int i, j;
 795         u32 intr_status;
 796         unsigned long flags;
 797         struct bnad *bnad = (struct bnad *)data;
 798         struct bnad_rx_info *rx_info;
 799         struct bnad_rx_ctrl *rx_ctrl;
 800         struct bna_tcb *tcb = NULL;
 801 
 802         spin_lock_irqsave(&bnad->bna_lock, flags);
 803         if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
 804                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
 805                 return IRQ_NONE;
 806         }
 807 
 808         bna_intr_status_get(&bnad->bna, intr_status);
 809 
 810         if (unlikely(!intr_status)) {
 811                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
 812                 return IRQ_NONE;
 813         }
 814 
 815         if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
 816                 bna_mbox_handler(&bnad->bna, intr_status);
 817 
 818         spin_unlock_irqrestore(&bnad->bna_lock, flags);
 819 
 820         if (!BNA_IS_INTX_DATA_INTR(intr_status))
 821                 return IRQ_HANDLED;
 822 
 823         /* Process data interrupts */
 824         /* Tx processing */
 825         for (i = 0; i < bnad->num_tx; i++) {
 826                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
 827                         tcb = bnad->tx_info[i].tcb[j];
 828                         if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
 829                                 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
 830                 }
 831         }
 832         /* Rx processing */
 833         for (i = 0; i < bnad->num_rx; i++) {
 834                 rx_info = &bnad->rx_info[i];
 835                 if (!rx_info->rx)
 836                         continue;
 837                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
 838                         rx_ctrl = &rx_info->rx_ctrl[j];
 839                         if (rx_ctrl->ccb)
 840                                 bnad_netif_rx_schedule_poll(bnad,
 841                                                             rx_ctrl->ccb);
 842                 }
 843         }
 844         return IRQ_HANDLED;
 845 }
 846 
 847 /*
 848  * Called in interrupt / callback context
 849  * with bna_lock held, so cfg_flags access is OK
 850  */
 851 static void
 852 bnad_enable_mbox_irq(struct bnad *bnad)
 853 {
 854         clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 855 
 856         BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
 857 }
 858 
 859 /*
 860  * Called with bnad->bna_lock held b'cos of
 861  * bnad->cfg_flags access.
 862  */
 863 static void
 864 bnad_disable_mbox_irq(struct bnad *bnad)
 865 {
 866         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
 867 
 868         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
 869 }
 870 
 871 static void
 872 bnad_set_netdev_perm_addr(struct bnad *bnad)
 873 {
 874         struct net_device *netdev = bnad->netdev;
 875 
 876         ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
 877         if (is_zero_ether_addr(netdev->dev_addr))
 878                 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
 879 }
 880 
 881 /* Control Path Handlers */
 882 
 883 /* Callbacks */
 884 void
 885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
 886 {
 887         bnad_enable_mbox_irq(bnad);
 888 }
 889 
 890 void
 891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
 892 {
 893         bnad_disable_mbox_irq(bnad);
 894 }
 895 
 896 void
 897 bnad_cb_ioceth_ready(struct bnad *bnad)
 898 {
 899         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 900         complete(&bnad->bnad_completions.ioc_comp);
 901 }
 902 
 903 void
 904 bnad_cb_ioceth_failed(struct bnad *bnad)
 905 {
 906         bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
 907         complete(&bnad->bnad_completions.ioc_comp);
 908 }
 909 
 910 void
 911 bnad_cb_ioceth_disabled(struct bnad *bnad)
 912 {
 913         bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
 914         complete(&bnad->bnad_completions.ioc_comp);
 915 }
 916 
 917 static void
 918 bnad_cb_enet_disabled(void *arg)
 919 {
 920         struct bnad *bnad = (struct bnad *)arg;
 921 
 922         netif_carrier_off(bnad->netdev);
 923         complete(&bnad->bnad_completions.enet_comp);
 924 }
 925 
 926 void
 927 bnad_cb_ethport_link_status(struct bnad *bnad,
 928                         enum bna_link_status link_status)
 929 {
 930         bool link_up = false;
 931 
 932         link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
 933 
 934         if (link_status == BNA_CEE_UP) {
 935                 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 936                         BNAD_UPDATE_CTR(bnad, cee_toggle);
 937                 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 938         } else {
 939                 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
 940                         BNAD_UPDATE_CTR(bnad, cee_toggle);
 941                 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
 942         }
 943 
 944         if (link_up) {
 945                 if (!netif_carrier_ok(bnad->netdev)) {
 946                         uint tx_id, tcb_id;
 947                         netdev_info(bnad->netdev, "link up\n");
 948                         netif_carrier_on(bnad->netdev);
 949                         BNAD_UPDATE_CTR(bnad, link_toggle);
 950                         for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
 951                                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
 952                                       tcb_id++) {
 953                                         struct bna_tcb *tcb =
 954                                         bnad->tx_info[tx_id].tcb[tcb_id];
 955                                         u32 txq_id;
 956                                         if (!tcb)
 957                                                 continue;
 958 
 959                                         txq_id = tcb->id;
 960 
 961                                         if (test_bit(BNAD_TXQ_TX_STARTED,
 962                                                      &tcb->flags)) {
 963                                                 /*
 964                                                  * Force an immediate
 965                                                  * Transmit Schedule */
 966                                                 netif_wake_subqueue(
 967                                                                 bnad->netdev,
 968                                                                 txq_id);
 969                                                 BNAD_UPDATE_CTR(bnad,
 970                                                         netif_queue_wakeup);
 971                                         } else {
 972                                                 netif_stop_subqueue(
 973                                                                 bnad->netdev,
 974                                                                 txq_id);
 975                                                 BNAD_UPDATE_CTR(bnad,
 976                                                         netif_queue_stop);
 977                                         }
 978                                 }
 979                         }
 980                 }
 981         } else {
 982                 if (netif_carrier_ok(bnad->netdev)) {
 983                         netdev_info(bnad->netdev, "link down\n");
 984                         netif_carrier_off(bnad->netdev);
 985                         BNAD_UPDATE_CTR(bnad, link_toggle);
 986                 }
 987         }
 988 }
 989 
 990 static void
 991 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
 992 {
 993         struct bnad *bnad = (struct bnad *)arg;
 994 
 995         complete(&bnad->bnad_completions.tx_comp);
 996 }
 997 
 998 static void
 999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000 {
1001         struct bnad_tx_info *tx_info =
1002                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1003 
1004         tcb->priv = tcb;
1005         tx_info->tcb[tcb->id] = tcb;
1006 }
1007 
1008 static void
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010 {
1011         struct bnad_tx_info *tx_info =
1012                         (struct bnad_tx_info *)tcb->txq->tx->priv;
1013 
1014         tx_info->tcb[tcb->id] = NULL;
1015         tcb->priv = NULL;
1016 }
1017 
1018 static void
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020 {
1021         struct bnad_rx_info *rx_info =
1022                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1023 
1024         rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025         ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026 }
1027 
1028 static void
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030 {
1031         struct bnad_rx_info *rx_info =
1032                         (struct bnad_rx_info *)ccb->cq->rx->priv;
1033 
1034         rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035 }
1036 
1037 static void
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039 {
1040         struct bnad_tx_info *tx_info =
1041                         (struct bnad_tx_info *)tx->priv;
1042         struct bna_tcb *tcb;
1043         u32 txq_id;
1044         int i;
1045 
1046         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047                 tcb = tx_info->tcb[i];
1048                 if (!tcb)
1049                         continue;
1050                 txq_id = tcb->id;
1051                 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052                 netif_stop_subqueue(bnad->netdev, txq_id);
1053         }
1054 }
1055 
1056 static void
1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058 {
1059         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060         struct bna_tcb *tcb;
1061         u32 txq_id;
1062         int i;
1063 
1064         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065                 tcb = tx_info->tcb[i];
1066                 if (!tcb)
1067                         continue;
1068                 txq_id = tcb->id;
1069 
1070                 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071                 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072                 BUG_ON(*(tcb->hw_consumer_index) != 0);
1073 
1074                 if (netif_carrier_ok(bnad->netdev)) {
1075                         netif_wake_subqueue(bnad->netdev, txq_id);
1076                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077                 }
1078         }
1079 
1080         /*
1081          * Workaround for first ioceth enable failure & we
1082          * get a 0 MAC address. We try to get the MAC address
1083          * again here.
1084          */
1085         if (is_zero_ether_addr(bnad->perm_addr)) {
1086                 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087                 bnad_set_netdev_perm_addr(bnad);
1088         }
1089 }
1090 
1091 /*
1092  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1093  */
1094 static void
1095 bnad_tx_cleanup(struct delayed_work *work)
1096 {
1097         struct bnad_tx_info *tx_info =
1098                 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099         struct bnad *bnad = NULL;
1100         struct bna_tcb *tcb;
1101         unsigned long flags;
1102         u32 i, pending = 0;
1103 
1104         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105                 tcb = tx_info->tcb[i];
1106                 if (!tcb)
1107                         continue;
1108 
1109                 bnad = tcb->bnad;
1110 
1111                 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112                         pending++;
1113                         continue;
1114                 }
1115 
1116                 bnad_txq_cleanup(bnad, tcb);
1117 
1118                 smp_mb__before_atomic();
1119                 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120         }
1121 
1122         if (pending) {
1123                 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124                         msecs_to_jiffies(1));
1125                 return;
1126         }
1127 
1128         spin_lock_irqsave(&bnad->bna_lock, flags);
1129         bna_tx_cleanup_complete(tx_info->tx);
1130         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131 }
1132 
1133 static void
1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135 {
1136         struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137         struct bna_tcb *tcb;
1138         int i;
1139 
1140         for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141                 tcb = tx_info->tcb[i];
1142                 if (!tcb)
1143                         continue;
1144         }
1145 
1146         queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147 }
1148 
1149 static void
1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151 {
1152         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153         struct bna_ccb *ccb;
1154         struct bnad_rx_ctrl *rx_ctrl;
1155         int i;
1156 
1157         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158                 rx_ctrl = &rx_info->rx_ctrl[i];
1159                 ccb = rx_ctrl->ccb;
1160                 if (!ccb)
1161                         continue;
1162 
1163                 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164 
1165                 if (ccb->rcb[1])
1166                         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167         }
1168 }
1169 
1170 /*
1171  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1172  */
1173 static void
1174 bnad_rx_cleanup(void *work)
1175 {
1176         struct bnad_rx_info *rx_info =
1177                 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178         struct bnad_rx_ctrl *rx_ctrl;
1179         struct bnad *bnad = NULL;
1180         unsigned long flags;
1181         u32 i;
1182 
1183         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184                 rx_ctrl = &rx_info->rx_ctrl[i];
1185 
1186                 if (!rx_ctrl->ccb)
1187                         continue;
1188 
1189                 bnad = rx_ctrl->ccb->bnad;
1190 
1191                 /*
1192                  * Wait till the poll handler has exited
1193                  * and nothing can be scheduled anymore
1194                  */
1195                 napi_disable(&rx_ctrl->napi);
1196 
1197                 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198                 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199                 if (rx_ctrl->ccb->rcb[1])
1200                         bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201         }
1202 
1203         spin_lock_irqsave(&bnad->bna_lock, flags);
1204         bna_rx_cleanup_complete(rx_info->rx);
1205         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206 }
1207 
1208 static void
1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210 {
1211         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212         struct bna_ccb *ccb;
1213         struct bnad_rx_ctrl *rx_ctrl;
1214         int i;
1215 
1216         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217                 rx_ctrl = &rx_info->rx_ctrl[i];
1218                 ccb = rx_ctrl->ccb;
1219                 if (!ccb)
1220                         continue;
1221 
1222                 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223 
1224                 if (ccb->rcb[1])
1225                         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226         }
1227 
1228         queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229 }
1230 
1231 static void
1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233 {
1234         struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235         struct bna_ccb *ccb;
1236         struct bna_rcb *rcb;
1237         struct bnad_rx_ctrl *rx_ctrl;
1238         int i, j;
1239 
1240         for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241                 rx_ctrl = &rx_info->rx_ctrl[i];
1242                 ccb = rx_ctrl->ccb;
1243                 if (!ccb)
1244                         continue;
1245 
1246                 napi_enable(&rx_ctrl->napi);
1247 
1248                 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249                         rcb = ccb->rcb[j];
1250                         if (!rcb)
1251                                 continue;
1252 
1253                         bnad_rxq_alloc_init(bnad, rcb);
1254                         set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255                         set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256                         bnad_rxq_post(bnad, rcb);
1257                 }
1258         }
1259 }
1260 
1261 static void
1262 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263 {
1264         struct bnad *bnad = (struct bnad *)arg;
1265 
1266         complete(&bnad->bnad_completions.rx_comp);
1267 }
1268 
1269 static void
1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271 {
1272         bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273         complete(&bnad->bnad_completions.mcast_comp);
1274 }
1275 
1276 void
1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278                        struct bna_stats *stats)
1279 {
1280         if (status == BNA_CB_SUCCESS)
1281                 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282 
1283         if (!netif_running(bnad->netdev) ||
1284                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285                 return;
1286 
1287         mod_timer(&bnad->stats_timer,
1288                   jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289 }
1290 
1291 static void
1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1293 {
1294         bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295         complete(&bnad->bnad_completions.mtu_comp);
1296 }
1297 
1298 void
1299 bnad_cb_completion(void *arg, enum bfa_status status)
1300 {
1301         struct bnad_iocmd_comp *iocmd_comp =
1302                         (struct bnad_iocmd_comp *)arg;
1303 
1304         iocmd_comp->comp_status = (u32) status;
1305         complete(&iocmd_comp->comp);
1306 }
1307 
1308 /* Resource allocation, free functions */
1309 
1310 static void
1311 bnad_mem_free(struct bnad *bnad,
1312               struct bna_mem_info *mem_info)
1313 {
1314         int i;
1315         dma_addr_t dma_pa;
1316 
1317         if (mem_info->mdl == NULL)
1318                 return;
1319 
1320         for (i = 0; i < mem_info->num; i++) {
1321                 if (mem_info->mdl[i].kva != NULL) {
1322                         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323                                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324                                                 dma_pa);
1325                                 dma_free_coherent(&bnad->pcidev->dev,
1326                                                   mem_info->mdl[i].len,
1327                                                   mem_info->mdl[i].kva, dma_pa);
1328                         } else
1329                                 kfree(mem_info->mdl[i].kva);
1330                 }
1331         }
1332         kfree(mem_info->mdl);
1333         mem_info->mdl = NULL;
1334 }
1335 
1336 static int
1337 bnad_mem_alloc(struct bnad *bnad,
1338                struct bna_mem_info *mem_info)
1339 {
1340         int i;
1341         dma_addr_t dma_pa;
1342 
1343         if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344                 mem_info->mdl = NULL;
1345                 return 0;
1346         }
1347 
1348         mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349                                 GFP_KERNEL);
1350         if (mem_info->mdl == NULL)
1351                 return -ENOMEM;
1352 
1353         if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354                 for (i = 0; i < mem_info->num; i++) {
1355                         mem_info->mdl[i].len = mem_info->len;
1356                         mem_info->mdl[i].kva =
1357                                 dma_alloc_coherent(&bnad->pcidev->dev,
1358                                                    mem_info->len, &dma_pa,
1359                                                    GFP_KERNEL);
1360                         if (mem_info->mdl[i].kva == NULL)
1361                                 goto err_return;
1362 
1363                         BNA_SET_DMA_ADDR(dma_pa,
1364                                          &(mem_info->mdl[i].dma));
1365                 }
1366         } else {
1367                 for (i = 0; i < mem_info->num; i++) {
1368                         mem_info->mdl[i].len = mem_info->len;
1369                         mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370                                                         GFP_KERNEL);
1371                         if (mem_info->mdl[i].kva == NULL)
1372                                 goto err_return;
1373                 }
1374         }
1375 
1376         return 0;
1377 
1378 err_return:
1379         bnad_mem_free(bnad, mem_info);
1380         return -ENOMEM;
1381 }
1382 
1383 /* Free IRQ for Mailbox */
1384 static void
1385 bnad_mbox_irq_free(struct bnad *bnad)
1386 {
1387         int irq;
1388         unsigned long flags;
1389 
1390         spin_lock_irqsave(&bnad->bna_lock, flags);
1391         bnad_disable_mbox_irq(bnad);
1392         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393 
1394         irq = BNAD_GET_MBOX_IRQ(bnad);
1395         free_irq(irq, bnad);
1396 }
1397 
1398 /*
1399  * Allocates IRQ for Mailbox, but keep it disabled
1400  * This will be enabled once we get the mbox enable callback
1401  * from bna
1402  */
1403 static int
1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1405 {
1406         int             err = 0;
1407         unsigned long   irq_flags, flags;
1408         u32     irq;
1409         irq_handler_t   irq_handler;
1410 
1411         spin_lock_irqsave(&bnad->bna_lock, flags);
1412         if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413                 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415                 irq_flags = 0;
1416         } else {
1417                 irq_handler = (irq_handler_t)bnad_isr;
1418                 irq = bnad->pcidev->irq;
1419                 irq_flags = IRQF_SHARED;
1420         }
1421 
1422         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423         sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424 
1425         /*
1426          * Set the Mbox IRQ disable flag, so that the IRQ handler
1427          * called from request_irq() for SHARED IRQs do not execute
1428          */
1429         set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430 
1431         BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432 
1433         err = request_irq(irq, irq_handler, irq_flags,
1434                           bnad->mbox_irq_name, bnad);
1435 
1436         return err;
1437 }
1438 
1439 static void
1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441 {
1442         kfree(intr_info->idl);
1443         intr_info->idl = NULL;
1444 }
1445 
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447 static int
1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449                     u32 txrx_id, struct bna_intr_info *intr_info)
1450 {
1451         int i, vector_start = 0;
1452         u32 cfg_flags;
1453         unsigned long flags;
1454 
1455         spin_lock_irqsave(&bnad->bna_lock, flags);
1456         cfg_flags = bnad->cfg_flags;
1457         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458 
1459         if (cfg_flags & BNAD_CF_MSIX) {
1460                 intr_info->intr_type = BNA_INTR_T_MSIX;
1461                 intr_info->idl = kcalloc(intr_info->num,
1462                                         sizeof(struct bna_intr_descr),
1463                                         GFP_KERNEL);
1464                 if (!intr_info->idl)
1465                         return -ENOMEM;
1466 
1467                 switch (src) {
1468                 case BNAD_INTR_TX:
1469                         vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470                         break;
1471 
1472                 case BNAD_INTR_RX:
1473                         vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474                                         (bnad->num_tx * bnad->num_txq_per_tx) +
1475                                         txrx_id;
1476                         break;
1477 
1478                 default:
1479                         BUG();
1480                 }
1481 
1482                 for (i = 0; i < intr_info->num; i++)
1483                         intr_info->idl[i].vector = vector_start + i;
1484         } else {
1485                 intr_info->intr_type = BNA_INTR_T_INTX;
1486                 intr_info->num = 1;
1487                 intr_info->idl = kcalloc(intr_info->num,
1488                                         sizeof(struct bna_intr_descr),
1489                                         GFP_KERNEL);
1490                 if (!intr_info->idl)
1491                         return -ENOMEM;
1492 
1493                 switch (src) {
1494                 case BNAD_INTR_TX:
1495                         intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496                         break;
1497 
1498                 case BNAD_INTR_RX:
1499                         intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500                         break;
1501                 }
1502         }
1503         return 0;
1504 }
1505 
1506 /* NOTE: Should be called for MSIX only
1507  * Unregisters Tx MSIX vector(s) from the kernel
1508  */
1509 static void
1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511                         int num_txqs)
1512 {
1513         int i;
1514         int vector_num;
1515 
1516         for (i = 0; i < num_txqs; i++) {
1517                 if (tx_info->tcb[i] == NULL)
1518                         continue;
1519 
1520                 vector_num = tx_info->tcb[i]->intr_vector;
1521                 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522         }
1523 }
1524 
1525 /* NOTE: Should be called for MSIX only
1526  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1527  */
1528 static int
1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530                         u32 tx_id, int num_txqs)
1531 {
1532         int i;
1533         int err;
1534         int vector_num;
1535 
1536         for (i = 0; i < num_txqs; i++) {
1537                 vector_num = tx_info->tcb[i]->intr_vector;
1538                 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539                                 tx_id + tx_info->tcb[i]->id);
1540                 err = request_irq(bnad->msix_table[vector_num].vector,
1541                                   (irq_handler_t)bnad_msix_tx, 0,
1542                                   tx_info->tcb[i]->name,
1543                                   tx_info->tcb[i]);
1544                 if (err)
1545                         goto err_return;
1546         }
1547 
1548         return 0;
1549 
1550 err_return:
1551         if (i > 0)
1552                 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553         return -1;
1554 }
1555 
1556 /* NOTE: Should be called for MSIX only
1557  * Unregisters Rx MSIX vector(s) from the kernel
1558  */
1559 static void
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561                         int num_rxps)
1562 {
1563         int i;
1564         int vector_num;
1565 
1566         for (i = 0; i < num_rxps; i++) {
1567                 if (rx_info->rx_ctrl[i].ccb == NULL)
1568                         continue;
1569 
1570                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571                 free_irq(bnad->msix_table[vector_num].vector,
1572                          rx_info->rx_ctrl[i].ccb);
1573         }
1574 }
1575 
1576 /* NOTE: Should be called for MSIX only
1577  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1578  */
1579 static int
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581                         u32 rx_id, int num_rxps)
1582 {
1583         int i;
1584         int err;
1585         int vector_num;
1586 
1587         for (i = 0; i < num_rxps; i++) {
1588                 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589                 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590                         bnad->netdev->name,
1591                         rx_id + rx_info->rx_ctrl[i].ccb->id);
1592                 err = request_irq(bnad->msix_table[vector_num].vector,
1593                                   (irq_handler_t)bnad_msix_rx, 0,
1594                                   rx_info->rx_ctrl[i].ccb->name,
1595                                   rx_info->rx_ctrl[i].ccb);
1596                 if (err)
1597                         goto err_return;
1598         }
1599 
1600         return 0;
1601 
1602 err_return:
1603         if (i > 0)
1604                 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605         return -1;
1606 }
1607 
1608 /* Free Tx object Resources */
1609 static void
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611 {
1612         int i;
1613 
1614         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615                 if (res_info[i].res_type == BNA_RES_T_MEM)
1616                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1618                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619         }
1620 }
1621 
1622 /* Allocates memory and interrupt resources for Tx object */
1623 static int
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625                   u32 tx_id)
1626 {
1627         int i, err = 0;
1628 
1629         for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630                 if (res_info[i].res_type == BNA_RES_T_MEM)
1631                         err = bnad_mem_alloc(bnad,
1632                                         &res_info[i].res_u.mem_info);
1633                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1634                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635                                         &res_info[i].res_u.intr_info);
1636                 if (err)
1637                         goto err_return;
1638         }
1639         return 0;
1640 
1641 err_return:
1642         bnad_tx_res_free(bnad, res_info);
1643         return err;
1644 }
1645 
1646 /* Free Rx object Resources */
1647 static void
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649 {
1650         int i;
1651 
1652         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653                 if (res_info[i].res_type == BNA_RES_T_MEM)
1654                         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656                         bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657         }
1658 }
1659 
1660 /* Allocates memory and interrupt resources for Rx object */
1661 static int
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663                   uint rx_id)
1664 {
1665         int i, err = 0;
1666 
1667         /* All memory needs to be allocated before setup_ccbs */
1668         for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669                 if (res_info[i].res_type == BNA_RES_T_MEM)
1670                         err = bnad_mem_alloc(bnad,
1671                                         &res_info[i].res_u.mem_info);
1672                 else if (res_info[i].res_type == BNA_RES_T_INTR)
1673                         err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674                                         &res_info[i].res_u.intr_info);
1675                 if (err)
1676                         goto err_return;
1677         }
1678         return 0;
1679 
1680 err_return:
1681         bnad_rx_res_free(bnad, res_info);
1682         return err;
1683 }
1684 
1685 /* Timer callbacks */
1686 /* a) IOC timer */
1687 static void
1688 bnad_ioc_timeout(struct timer_list *t)
1689 {
1690         struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691         unsigned long flags;
1692 
1693         spin_lock_irqsave(&bnad->bna_lock, flags);
1694         bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696 }
1697 
1698 static void
1699 bnad_ioc_hb_check(struct timer_list *t)
1700 {
1701         struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702         unsigned long flags;
1703 
1704         spin_lock_irqsave(&bnad->bna_lock, flags);
1705         bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707 }
1708 
1709 static void
1710 bnad_iocpf_timeout(struct timer_list *t)
1711 {
1712         struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713         unsigned long flags;
1714 
1715         spin_lock_irqsave(&bnad->bna_lock, flags);
1716         bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718 }
1719 
1720 static void
1721 bnad_iocpf_sem_timeout(struct timer_list *t)
1722 {
1723         struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724         unsigned long flags;
1725 
1726         spin_lock_irqsave(&bnad->bna_lock, flags);
1727         bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729 }
1730 
1731 /*
1732  * All timer routines use bnad->bna_lock to protect against
1733  * the following race, which may occur in case of no locking:
1734  *      Time    CPU m   CPU n
1735  *      0       1 = test_bit
1736  *      1                       clear_bit
1737  *      2                       del_timer_sync
1738  *      3       mod_timer
1739  */
1740 
1741 /* b) Dynamic Interrupt Moderation Timer */
1742 static void
1743 bnad_dim_timeout(struct timer_list *t)
1744 {
1745         struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746         struct bnad_rx_info *rx_info;
1747         struct bnad_rx_ctrl *rx_ctrl;
1748         int i, j;
1749         unsigned long flags;
1750 
1751         if (!netif_carrier_ok(bnad->netdev))
1752                 return;
1753 
1754         spin_lock_irqsave(&bnad->bna_lock, flags);
1755         for (i = 0; i < bnad->num_rx; i++) {
1756                 rx_info = &bnad->rx_info[i];
1757                 if (!rx_info->rx)
1758                         continue;
1759                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760                         rx_ctrl = &rx_info->rx_ctrl[j];
1761                         if (!rx_ctrl->ccb)
1762                                 continue;
1763                         bna_rx_dim_update(rx_ctrl->ccb);
1764                 }
1765         }
1766 
1767         /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1768         if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769                 mod_timer(&bnad->dim_timer,
1770                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772 }
1773 
1774 /* c)  Statistics Timer */
1775 static void
1776 bnad_stats_timeout(struct timer_list *t)
1777 {
1778         struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779         unsigned long flags;
1780 
1781         if (!netif_running(bnad->netdev) ||
1782                 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783                 return;
1784 
1785         spin_lock_irqsave(&bnad->bna_lock, flags);
1786         bna_hw_stats_get(&bnad->bna);
1787         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788 }
1789 
1790 /*
1791  * Set up timer for DIM
1792  * Called with bnad->bna_lock held
1793  */
1794 void
1795 bnad_dim_timer_start(struct bnad *bnad)
1796 {
1797         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798             !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799                 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800                 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801                 mod_timer(&bnad->dim_timer,
1802                           jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803         }
1804 }
1805 
1806 /*
1807  * Set up timer for statistics
1808  * Called with mutex_lock(&bnad->conf_mutex) held
1809  */
1810 static void
1811 bnad_stats_timer_start(struct bnad *bnad)
1812 {
1813         unsigned long flags;
1814 
1815         spin_lock_irqsave(&bnad->bna_lock, flags);
1816         if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817                 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818                 mod_timer(&bnad->stats_timer,
1819                           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820         }
1821         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822 }
1823 
1824 /*
1825  * Stops the stats timer
1826  * Called with mutex_lock(&bnad->conf_mutex) held
1827  */
1828 static void
1829 bnad_stats_timer_stop(struct bnad *bnad)
1830 {
1831         int to_del = 0;
1832         unsigned long flags;
1833 
1834         spin_lock_irqsave(&bnad->bna_lock, flags);
1835         if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836                 to_del = 1;
1837         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838         if (to_del)
1839                 del_timer_sync(&bnad->stats_timer);
1840 }
1841 
1842 /* Utilities */
1843 
1844 static void
1845 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846 {
1847         int i = 1; /* Index 0 has broadcast address */
1848         struct netdev_hw_addr *mc_addr;
1849 
1850         netdev_for_each_mc_addr(mc_addr, netdev) {
1851                 ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852                 i++;
1853         }
1854 }
1855 
1856 static int
1857 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858 {
1859         struct bnad_rx_ctrl *rx_ctrl =
1860                 container_of(napi, struct bnad_rx_ctrl, napi);
1861         struct bnad *bnad = rx_ctrl->bnad;
1862         int rcvd = 0;
1863 
1864         rx_ctrl->rx_poll_ctr++;
1865 
1866         if (!netif_carrier_ok(bnad->netdev))
1867                 goto poll_exit;
1868 
1869         rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870         if (rcvd >= budget)
1871                 return rcvd;
1872 
1873 poll_exit:
1874         napi_complete_done(napi, rcvd);
1875 
1876         rx_ctrl->rx_complete++;
1877 
1878         if (rx_ctrl->ccb)
1879                 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880 
1881         return rcvd;
1882 }
1883 
1884 #define BNAD_NAPI_POLL_QUOTA            64
1885 static void
1886 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1887 {
1888         struct bnad_rx_ctrl *rx_ctrl;
1889         int i;
1890 
1891         /* Initialize & enable NAPI */
1892         for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1893                 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894                 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895                                bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1896         }
1897 }
1898 
1899 static void
1900 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1901 {
1902         int i;
1903 
1904         /* First disable and then clean up */
1905         for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906                 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1907 }
1908 
1909 /* Should be held with conf_lock held */
1910 void
1911 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1912 {
1913         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915         unsigned long flags;
1916 
1917         if (!tx_info->tx)
1918                 return;
1919 
1920         init_completion(&bnad->bnad_completions.tx_comp);
1921         spin_lock_irqsave(&bnad->bna_lock, flags);
1922         bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924         wait_for_completion(&bnad->bnad_completions.tx_comp);
1925 
1926         if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927                 bnad_tx_msix_unregister(bnad, tx_info,
1928                         bnad->num_txq_per_tx);
1929 
1930         spin_lock_irqsave(&bnad->bna_lock, flags);
1931         bna_tx_destroy(tx_info->tx);
1932         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933 
1934         tx_info->tx = NULL;
1935         tx_info->tx_id = 0;
1936 
1937         bnad_tx_res_free(bnad, res_info);
1938 }
1939 
1940 /* Should be held with conf_lock held */
1941 int
1942 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1943 {
1944         int err;
1945         struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946         struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947         struct bna_intr_info *intr_info =
1948                         &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949         struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950         static const struct bna_tx_event_cbfn tx_cbfn = {
1951                 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1952                 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953                 .tx_stall_cbfn = bnad_cb_tx_stall,
1954                 .tx_resume_cbfn = bnad_cb_tx_resume,
1955                 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1956         };
1957 
1958         struct bna_tx *tx;
1959         unsigned long flags;
1960 
1961         tx_info->tx_id = tx_id;
1962 
1963         /* Initialize the Tx object configuration */
1964         tx_config->num_txq = bnad->num_txq_per_tx;
1965         tx_config->txq_depth = bnad->txq_depth;
1966         tx_config->tx_type = BNA_TX_T_REGULAR;
1967         tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1968 
1969         /* Get BNA's resource requirement for one tx object */
1970         spin_lock_irqsave(&bnad->bna_lock, flags);
1971         bna_tx_res_req(bnad->num_txq_per_tx,
1972                 bnad->txq_depth, res_info);
1973         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1974 
1975         /* Fill Unmap Q memory requirements */
1976         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977                         bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978                         bnad->txq_depth));
1979 
1980         /* Allocate resources */
1981         err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1982         if (err)
1983                 return err;
1984 
1985         /* Ask BNA to create one Tx object, supplying required resources */
1986         spin_lock_irqsave(&bnad->bna_lock, flags);
1987         tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1988                         tx_info);
1989         spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990         if (!tx) {
1991                 err = -ENOMEM;
1992                 goto err_return;
1993         }
1994         tx_info->tx = tx;
1995 
1996         INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997                         (work_func_t)bnad_tx_cleanup);
1998 
1999         /* Register ISR for the Tx object */
2000         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001                 err = bnad_tx_msix_register(bnad, tx_info,
2002                         tx_id, bnad->num_txq_per_tx);
2003                 if (err)
2004                         goto cleanup_tx;
2005         }
2006 
2007         spin_lock_irqsave(&bnad->bna_lock, flags);
2008         bna_tx_enable(tx);
2009         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2010 
2011         return 0;
2012 
2013 cleanup_tx:
2014         spin_lock_irqsave(&bnad->bna_lock, flags);
2015         bna_tx_destroy(tx_info->tx);
2016         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2017         tx_info->tx = NULL;
2018         tx_info->tx_id = 0;
2019 err_return:
2020         bnad_tx_res_free(bnad, res_info);
2021         return err;
2022 }
2023 
2024 /* Setup the rx config for bna_rx_create */
2025 /* bnad decides the configuration */
2026 static void
2027 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2028 {
2029         memset(rx_config, 0, sizeof(*rx_config));
2030         rx_config->rx_type = BNA_RX_T_REGULAR;
2031         rx_config->num_paths = bnad->num_rxp_per_rx;
2032         rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2033 
2034         if (bnad->num_rxp_per_rx > 1) {
2035                 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036                 rx_config->rss_config.hash_type =
2037                                 (BFI_ENET_RSS_IPV6 |
2038                                  BFI_ENET_RSS_IPV6_TCP |
2039                                  BFI_ENET_RSS_IPV4 |
2040                                  BFI_ENET_RSS_IPV4_TCP);
2041                 rx_config->rss_config.hash_mask =
2042                                 bnad->num_rxp_per_rx - 1;
2043                 netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044                         sizeof(rx_config->rss_config.toeplitz_hash_key));
2045         } else {
2046                 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047                 memset(&rx_config->rss_config, 0,
2048                        sizeof(rx_config->rss_config));
2049         }
2050 
2051         rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052         rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2053 
2054         /* BNA_RXP_SINGLE - one data-buffer queue
2055          * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2056          * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2057          */
2058         /* TODO: configurable param for queue type */
2059         rx_config->rxp_type = BNA_RXP_SLR;
2060 
2061         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062             rx_config->frame_size > 4096) {
2063                 /* though size_routing_enable is set in SLR,
2064                  * small packets may get routed to same rxq.
2065                  * set buf_size to 2048 instead of PAGE_SIZE.
2066                  */
2067                 rx_config->q0_buf_size = 2048;
2068                 /* this should be in multiples of 2 */
2069                 rx_config->q0_num_vecs = 4;
2070                 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071                 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2072         } else {
2073                 rx_config->q0_buf_size = rx_config->frame_size;
2074                 rx_config->q0_num_vecs = 1;
2075                 rx_config->q0_depth = bnad->rxq_depth;
2076         }
2077 
2078         /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2079         if (rx_config->rxp_type == BNA_RXP_SLR) {
2080                 rx_config->q1_depth = bnad->rxq_depth;
2081                 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2082         }
2083 
2084         rx_config->vlan_strip_status =
2085                 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2086                 BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2087 }
2088 
2089 static void
2090 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2091 {
2092         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2093         int i;
2094 
2095         for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096                 rx_info->rx_ctrl[i].bnad = bnad;
2097 }
2098 
2099 /* Called with mutex_lock(&bnad->conf_mutex) held */
2100 static u32
2101 bnad_reinit_rx(struct bnad *bnad)
2102 {
2103         struct net_device *netdev = bnad->netdev;
2104         u32 err = 0, current_err = 0;
2105         u32 rx_id = 0, count = 0;
2106         unsigned long flags;
2107 
2108         /* destroy and create new rx objects */
2109         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110                 if (!bnad->rx_info[rx_id].rx)
2111                         continue;
2112                 bnad_destroy_rx(bnad, rx_id);
2113         }
2114 
2115         spin_lock_irqsave(&bnad->bna_lock, flags);
2116         bna_enet_mtu_set(&bnad->bna.enet,
2117                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2119 
2120         for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121                 count++;
2122                 current_err = bnad_setup_rx(bnad, rx_id);
2123                 if (current_err && !err) {
2124                         err = current_err;
2125                         netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2126                 }
2127         }
2128 
2129         /* restore rx configuration */
2130         if (bnad->rx_info[0].rx && !err) {
2131                 bnad_restore_vlans(bnad, 0);
2132                 bnad_enable_default_bcast(bnad);
2133                 spin_lock_irqsave(&bnad->bna_lock, flags);
2134                 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136                 bnad_set_rx_mode(netdev);
2137         }
2138 
2139         return count;
2140 }
2141 
2142 /* Called with bnad_conf_lock() held */
2143 void
2144 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2145 {
2146         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2149         unsigned long flags;
2150         int to_del = 0;
2151 
2152         if (!rx_info->rx)
2153                 return;
2154 
2155         if (0 == rx_id) {
2156                 spin_lock_irqsave(&bnad->bna_lock, flags);
2157                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158                     test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159                         clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2160                         to_del = 1;
2161                 }
2162                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2163                 if (to_del)
2164                         del_timer_sync(&bnad->dim_timer);
2165         }
2166 
2167         init_completion(&bnad->bnad_completions.rx_comp);
2168         spin_lock_irqsave(&bnad->bna_lock, flags);
2169         bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2170         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171         wait_for_completion(&bnad->bnad_completions.rx_comp);
2172 
2173         if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2174                 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2175 
2176         bnad_napi_delete(bnad, rx_id);
2177 
2178         spin_lock_irqsave(&bnad->bna_lock, flags);
2179         bna_rx_destroy(rx_info->rx);
2180 
2181         rx_info->rx = NULL;
2182         rx_info->rx_id = 0;
2183         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2184 
2185         bnad_rx_res_free(bnad, res_info);
2186 }
2187 
2188 /* Called with mutex_lock(&bnad->conf_mutex) held */
2189 int
2190 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2191 {
2192         int err;
2193         struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194         struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195         struct bna_intr_info *intr_info =
2196                         &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2197         struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2198         static const struct bna_rx_event_cbfn rx_cbfn = {
2199                 .rcb_setup_cbfn = NULL,
2200                 .rcb_destroy_cbfn = NULL,
2201                 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2202                 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2203                 .rx_stall_cbfn = bnad_cb_rx_stall,
2204                 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2205                 .rx_post_cbfn = bnad_cb_rx_post,
2206         };
2207         struct bna_rx *rx;
2208         unsigned long flags;
2209 
2210         rx_info->rx_id = rx_id;
2211 
2212         /* Initialize the Rx object configuration */
2213         bnad_init_rx_config(bnad, rx_config);
2214 
2215         /* Get BNA's resource requirement for one Rx object */
2216         spin_lock_irqsave(&bnad->bna_lock, flags);
2217         bna_rx_res_req(rx_config, res_info);
2218         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2219 
2220         /* Fill Unmap Q memory requirements */
2221         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2222                                  rx_config->num_paths,
2223                         (rx_config->q0_depth *
2224                          sizeof(struct bnad_rx_unmap)) +
2225                          sizeof(struct bnad_rx_unmap_q));
2226 
2227         if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2228                 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2229                                          rx_config->num_paths,
2230                                 (rx_config->q1_depth *
2231                                  sizeof(struct bnad_rx_unmap) +
2232                                  sizeof(struct bnad_rx_unmap_q)));
2233         }
2234         /* Allocate resource */
2235         err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2236         if (err)
2237                 return err;
2238 
2239         bnad_rx_ctrl_init(bnad, rx_id);
2240 
2241         /* Ask BNA to create one Rx object, supplying required resources */
2242         spin_lock_irqsave(&bnad->bna_lock, flags);
2243         rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2244                         rx_info);
2245         if (!rx) {
2246                 err = -ENOMEM;
2247                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248                 goto err_return;
2249         }
2250         rx_info->rx = rx;
2251         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2252 
2253         INIT_WORK(&rx_info->rx_cleanup_work,
2254                         (work_func_t)(bnad_rx_cleanup));
2255 
2256         /*
2257          * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2258          * so that IRQ handler cannot schedule NAPI at this point.
2259          */
2260         bnad_napi_add(bnad, rx_id);
2261 
2262         /* Register ISR for the Rx object */
2263         if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2264                 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2265                                                 rx_config->num_paths);
2266                 if (err)
2267                         goto err_return;
2268         }
2269 
2270         spin_lock_irqsave(&bnad->bna_lock, flags);
2271         if (0 == rx_id) {
2272                 /* Set up Dynamic Interrupt Moderation Vector */
2273                 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274                         bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2275 
2276                 /* Enable VLAN filtering only on the default Rx */
2277                 bna_rx_vlanfilter_enable(rx);
2278 
2279                 /* Start the DIM timer */
2280                 bnad_dim_timer_start(bnad);
2281         }
2282 
2283         bna_rx_enable(rx);
2284         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2285 
2286         return 0;
2287 
2288 err_return:
2289         bnad_destroy_rx(bnad, rx_id);
2290         return err;
2291 }
2292 
2293 /* Called with conf_lock & bnad->bna_lock held */
2294 void
2295 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2296 {
2297         struct bnad_tx_info *tx_info;
2298 
2299         tx_info = &bnad->tx_info[0];
2300         if (!tx_info->tx)
2301                 return;
2302 
2303         bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2304 }
2305 
2306 /* Called with conf_lock & bnad->bna_lock held */
2307 void
2308 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2309 {
2310         struct bnad_rx_info *rx_info;
2311         int     i;
2312 
2313         for (i = 0; i < bnad->num_rx; i++) {
2314                 rx_info = &bnad->rx_info[i];
2315                 if (!rx_info->rx)
2316                         continue;
2317                 bna_rx_coalescing_timeo_set(rx_info->rx,
2318                                 bnad->rx_coalescing_timeo);
2319         }
2320 }
2321 
2322 /*
2323  * Called with bnad->bna_lock held
2324  */
2325 int
2326 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2327 {
2328         int ret;
2329 
2330         if (!is_valid_ether_addr(mac_addr))
2331                 return -EADDRNOTAVAIL;
2332 
2333         /* If datapath is down, pretend everything went through */
2334         if (!bnad->rx_info[0].rx)
2335                 return 0;
2336 
2337         ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2338         if (ret != BNA_CB_SUCCESS)
2339                 return -EADDRNOTAVAIL;
2340 
2341         return 0;
2342 }
2343 
2344 /* Should be called with conf_lock held */
2345 int
2346 bnad_enable_default_bcast(struct bnad *bnad)
2347 {
2348         struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2349         int ret;
2350         unsigned long flags;
2351 
2352         init_completion(&bnad->bnad_completions.mcast_comp);
2353 
2354         spin_lock_irqsave(&bnad->bna_lock, flags);
2355         ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2356                                bnad_cb_rx_mcast_add);
2357         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358 
2359         if (ret == BNA_CB_SUCCESS)
2360                 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2361         else
2362                 return -ENODEV;
2363 
2364         if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2365                 return -ENODEV;
2366 
2367         return 0;
2368 }
2369 
2370 /* Called with mutex_lock(&bnad->conf_mutex) held */
2371 void
2372 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2373 {
2374         u16 vid;
2375         unsigned long flags;
2376 
2377         for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378                 spin_lock_irqsave(&bnad->bna_lock, flags);
2379                 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2381         }
2382 }
2383 
2384 /* Statistics utilities */
2385 void
2386 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2387 {
2388         int i, j;
2389 
2390         for (i = 0; i < bnad->num_rx; i++) {
2391                 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392                         if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393                                 stats->rx_packets += bnad->rx_info[i].
2394                                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2395                                 stats->rx_bytes += bnad->rx_info[i].
2396                                         rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2397                                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398                                         bnad->rx_info[i].rx_ctrl[j].ccb->
2399                                         rcb[1]->rxq) {
2400                                         stats->rx_packets +=
2401                                                 bnad->rx_info[i].rx_ctrl[j].
2402                                                 ccb->rcb[1]->rxq->rx_packets;
2403                                         stats->rx_bytes +=
2404                                                 bnad->rx_info[i].rx_ctrl[j].
2405                                                 ccb->rcb[1]->rxq->rx_bytes;
2406                                 }
2407                         }
2408                 }
2409         }
2410         for (i = 0; i < bnad->num_tx; i++) {
2411                 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412                         if (bnad->tx_info[i].tcb[j]) {
2413                                 stats->tx_packets +=
2414                                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2415                                 stats->tx_bytes +=
2416                                         bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2417                         }
2418                 }
2419         }
2420 }
2421 
2422 /*
2423  * Must be called with the bna_lock held.
2424  */
2425 void
2426 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2427 {
2428         struct bfi_enet_stats_mac *mac_stats;
2429         u32 bmap;
2430         int i;
2431 
2432         mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2433         stats->rx_errors =
2434                 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2435                 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2436                 mac_stats->rx_undersize;
2437         stats->tx_errors = mac_stats->tx_fcs_error +
2438                                         mac_stats->tx_undersize;
2439         stats->rx_dropped = mac_stats->rx_drop;
2440         stats->tx_dropped = mac_stats->tx_drop;
2441         stats->multicast = mac_stats->rx_multicast;
2442         stats->collisions = mac_stats->tx_total_collision;
2443 
2444         stats->rx_length_errors = mac_stats->rx_frame_length_error;
2445 
2446         /* receive ring buffer overflow  ?? */
2447 
2448         stats->rx_crc_errors = mac_stats->rx_fcs_error;
2449         stats->rx_frame_errors = mac_stats->rx_alignment_error;
2450         /* recv'r fifo overrun */
2451         bmap = bna_rx_rid_mask(&bnad->bna);
2452         for (i = 0; bmap; i++) {
2453                 if (bmap & 1) {
2454                         stats->rx_fifo_errors +=
2455                                 bnad->stats.bna_stats->
2456                                         hw_stats.rxf_stats[i].frame_drops;
2457                         break;
2458                 }
2459                 bmap >>= 1;
2460         }
2461 }
2462 
2463 static void
2464 bnad_mbox_irq_sync(struct bnad *bnad)
2465 {
2466         u32 irq;
2467         unsigned long flags;
2468 
2469         spin_lock_irqsave(&bnad->bna_lock, flags);
2470         if (bnad->cfg_flags & BNAD_CF_MSIX)
2471                 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2472         else
2473                 irq = bnad->pcidev->irq;
2474         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2475 
2476         synchronize_irq(irq);
2477 }
2478 
2479 /* Utility used by bnad_start_xmit, for doing TSO */
2480 static int
2481 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2482 {
2483         int err;
2484 
2485         err = skb_cow_head(skb, 0);
2486         if (err < 0) {
2487                 BNAD_UPDATE_CTR(bnad, tso_err);
2488                 return err;
2489         }
2490 
2491         /*
2492          * For TSO, the TCP checksum field is seeded with pseudo-header sum
2493          * excluding the length field.
2494          */
2495         if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2496                 struct iphdr *iph = ip_hdr(skb);
2497 
2498                 /* Do we really need these? */
2499                 iph->tot_len = 0;
2500                 iph->check = 0;
2501 
2502                 tcp_hdr(skb)->check =
2503                         ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2504                                            IPPROTO_TCP, 0);
2505                 BNAD_UPDATE_CTR(bnad, tso4);
2506         } else {
2507                 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2508 
2509                 ipv6h->payload_len = 0;
2510                 tcp_hdr(skb)->check =
2511                         ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2512                                          IPPROTO_TCP, 0);
2513                 BNAD_UPDATE_CTR(bnad, tso6);
2514         }
2515 
2516         return 0;
2517 }
2518 
2519 /*
2520  * Initialize Q numbers depending on Rx Paths
2521  * Called with bnad->bna_lock held, because of cfg_flags
2522  * access.
2523  */
2524 static void
2525 bnad_q_num_init(struct bnad *bnad)
2526 {
2527         int rxps;
2528 
2529         rxps = min((uint)num_online_cpus(),
2530                         (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2531 
2532         if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2533                 rxps = 1;       /* INTx */
2534 
2535         bnad->num_rx = 1;
2536         bnad->num_tx = 1;
2537         bnad->num_rxp_per_rx = rxps;
2538         bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2539 }
2540 
2541 /*
2542  * Adjusts the Q numbers, given a number of msix vectors
2543  * Give preference to RSS as opposed to Tx priority Queues,
2544  * in such a case, just use 1 Tx Q
2545  * Called with bnad->bna_lock held b'cos of cfg_flags access
2546  */
2547 static void
2548 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2549 {
2550         bnad->num_txq_per_tx = 1;
2551         if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2552              bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2553             (bnad->cfg_flags & BNAD_CF_MSIX)) {
2554                 bnad->num_rxp_per_rx = msix_vectors -
2555                         (bnad->num_tx * bnad->num_txq_per_tx) -
2556                         BNAD_MAILBOX_MSIX_VECTORS;
2557         } else
2558                 bnad->num_rxp_per_rx = 1;
2559 }
2560 
2561 /* Enable / disable ioceth */
2562 static int
2563 bnad_ioceth_disable(struct bnad *bnad)
2564 {
2565         unsigned long flags;
2566         int err = 0;
2567 
2568         spin_lock_irqsave(&bnad->bna_lock, flags);
2569         init_completion(&bnad->bnad_completions.ioc_comp);
2570         bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2571         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2572 
2573         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2574                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2575 
2576         err = bnad->bnad_completions.ioc_comp_status;
2577         return err;
2578 }
2579 
2580 static int
2581 bnad_ioceth_enable(struct bnad *bnad)
2582 {
2583         int err = 0;
2584         unsigned long flags;
2585 
2586         spin_lock_irqsave(&bnad->bna_lock, flags);
2587         init_completion(&bnad->bnad_completions.ioc_comp);
2588         bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2589         bna_ioceth_enable(&bnad->bna.ioceth);
2590         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2591 
2592         wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2593                 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2594 
2595         err = bnad->bnad_completions.ioc_comp_status;
2596 
2597         return err;
2598 }
2599 
2600 /* Free BNA resources */
2601 static void
2602 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2603                 u32 res_val_max)
2604 {
2605         int i;
2606 
2607         for (i = 0; i < res_val_max; i++)
2608                 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2609 }
2610 
2611 /* Allocates memory and interrupt resources for BNA */
2612 static int
2613 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2614                 u32 res_val_max)
2615 {
2616         int i, err;
2617 
2618         for (i = 0; i < res_val_max; i++) {
2619                 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2620                 if (err)
2621                         goto err_return;
2622         }
2623         return 0;
2624 
2625 err_return:
2626         bnad_res_free(bnad, res_info, res_val_max);
2627         return err;
2628 }
2629 
2630 /* Interrupt enable / disable */
2631 static void
2632 bnad_enable_msix(struct bnad *bnad)
2633 {
2634         int i, ret;
2635         unsigned long flags;
2636 
2637         spin_lock_irqsave(&bnad->bna_lock, flags);
2638         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2639                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2640                 return;
2641         }
2642         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2643 
2644         if (bnad->msix_table)
2645                 return;
2646 
2647         bnad->msix_table =
2648                 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2649 
2650         if (!bnad->msix_table)
2651                 goto intx_mode;
2652 
2653         for (i = 0; i < bnad->msix_num; i++)
2654                 bnad->msix_table[i].entry = i;
2655 
2656         ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2657                                     1, bnad->msix_num);
2658         if (ret < 0) {
2659                 goto intx_mode;
2660         } else if (ret < bnad->msix_num) {
2661                 dev_warn(&bnad->pcidev->dev,
2662                          "%d MSI-X vectors allocated < %d requested\n",
2663                          ret, bnad->msix_num);
2664 
2665                 spin_lock_irqsave(&bnad->bna_lock, flags);
2666                 /* ret = #of vectors that we got */
2667                 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2668                         (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2669                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2670 
2671                 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2672                          BNAD_MAILBOX_MSIX_VECTORS;
2673 
2674                 if (bnad->msix_num > ret) {
2675                         pci_disable_msix(bnad->pcidev);
2676                         goto intx_mode;
2677                 }
2678         }
2679 
2680         pci_intx(bnad->pcidev, 0);
2681 
2682         return;
2683 
2684 intx_mode:
2685         dev_warn(&bnad->pcidev->dev,
2686                  "MSI-X enable failed - operating in INTx mode\n");
2687 
2688         kfree(bnad->msix_table);
2689         bnad->msix_table = NULL;
2690         bnad->msix_num = 0;
2691         spin_lock_irqsave(&bnad->bna_lock, flags);
2692         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2693         bnad_q_num_init(bnad);
2694         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2695 }
2696 
2697 static void
2698 bnad_disable_msix(struct bnad *bnad)
2699 {
2700         u32 cfg_flags;
2701         unsigned long flags;
2702 
2703         spin_lock_irqsave(&bnad->bna_lock, flags);
2704         cfg_flags = bnad->cfg_flags;
2705         if (bnad->cfg_flags & BNAD_CF_MSIX)
2706                 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2707         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2708 
2709         if (cfg_flags & BNAD_CF_MSIX) {
2710                 pci_disable_msix(bnad->pcidev);
2711                 kfree(bnad->msix_table);
2712                 bnad->msix_table = NULL;
2713         }
2714 }
2715 
2716 /* Netdev entry points */
2717 static int
2718 bnad_open(struct net_device *netdev)
2719 {
2720         int err;
2721         struct bnad *bnad = netdev_priv(netdev);
2722         struct bna_pause_config pause_config;
2723         unsigned long flags;
2724 
2725         mutex_lock(&bnad->conf_mutex);
2726 
2727         /* Tx */
2728         err = bnad_setup_tx(bnad, 0);
2729         if (err)
2730                 goto err_return;
2731 
2732         /* Rx */
2733         err = bnad_setup_rx(bnad, 0);
2734         if (err)
2735                 goto cleanup_tx;
2736 
2737         /* Port */
2738         pause_config.tx_pause = 0;
2739         pause_config.rx_pause = 0;
2740 
2741         spin_lock_irqsave(&bnad->bna_lock, flags);
2742         bna_enet_mtu_set(&bnad->bna.enet,
2743                          BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2744         bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2745         bna_enet_enable(&bnad->bna.enet);
2746         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2747 
2748         /* Enable broadcast */
2749         bnad_enable_default_bcast(bnad);
2750 
2751         /* Restore VLANs, if any */
2752         bnad_restore_vlans(bnad, 0);
2753 
2754         /* Set the UCAST address */
2755         spin_lock_irqsave(&bnad->bna_lock, flags);
2756         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2757         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2758 
2759         /* Start the stats timer */
2760         bnad_stats_timer_start(bnad);
2761 
2762         mutex_unlock(&bnad->conf_mutex);
2763 
2764         return 0;
2765 
2766 cleanup_tx:
2767         bnad_destroy_tx(bnad, 0);
2768 
2769 err_return:
2770         mutex_unlock(&bnad->conf_mutex);
2771         return err;
2772 }
2773 
2774 static int
2775 bnad_stop(struct net_device *netdev)
2776 {
2777         struct bnad *bnad = netdev_priv(netdev);
2778         unsigned long flags;
2779 
2780         mutex_lock(&bnad->conf_mutex);
2781 
2782         /* Stop the stats timer */
2783         bnad_stats_timer_stop(bnad);
2784 
2785         init_completion(&bnad->bnad_completions.enet_comp);
2786 
2787         spin_lock_irqsave(&bnad->bna_lock, flags);
2788         bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2789                         bnad_cb_enet_disabled);
2790         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2791 
2792         wait_for_completion(&bnad->bnad_completions.enet_comp);
2793 
2794         bnad_destroy_tx(bnad, 0);
2795         bnad_destroy_rx(bnad, 0);
2796 
2797         /* Synchronize mailbox IRQ */
2798         bnad_mbox_irq_sync(bnad);
2799 
2800         mutex_unlock(&bnad->conf_mutex);
2801 
2802         return 0;
2803 }
2804 
2805 /* TX */
2806 /* Returns 0 for success */
2807 static int
2808 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2809                     struct sk_buff *skb, struct bna_txq_entry *txqent)
2810 {
2811         u16 flags = 0;
2812         u32 gso_size;
2813         u16 vlan_tag = 0;
2814 
2815         if (skb_vlan_tag_present(skb)) {
2816                 vlan_tag = (u16)skb_vlan_tag_get(skb);
2817                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2818         }
2819         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2820                 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2821                                 | (vlan_tag & 0x1fff);
2822                 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2823         }
2824         txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2825 
2826         if (skb_is_gso(skb)) {
2827                 gso_size = skb_shinfo(skb)->gso_size;
2828                 if (unlikely(gso_size > bnad->netdev->mtu)) {
2829                         BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2830                         return -EINVAL;
2831                 }
2832                 if (unlikely((gso_size + skb_transport_offset(skb) +
2833                               tcp_hdrlen(skb)) >= skb->len)) {
2834                         txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2835                         txqent->hdr.wi.lso_mss = 0;
2836                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2837                 } else {
2838                         txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2839                         txqent->hdr.wi.lso_mss = htons(gso_size);
2840                 }
2841 
2842                 if (bnad_tso_prepare(bnad, skb)) {
2843                         BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2844                         return -EINVAL;
2845                 }
2846 
2847                 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2848                 txqent->hdr.wi.l4_hdr_size_n_offset =
2849                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2850                         tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2851         } else  {
2852                 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2853                 txqent->hdr.wi.lso_mss = 0;
2854 
2855                 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2856                         BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2857                         return -EINVAL;
2858                 }
2859 
2860                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2861                         __be16 net_proto = vlan_get_protocol(skb);
2862                         u8 proto = 0;
2863 
2864                         if (net_proto == htons(ETH_P_IP))
2865                                 proto = ip_hdr(skb)->protocol;
2866 #ifdef NETIF_F_IPV6_CSUM
2867                         else if (net_proto == htons(ETH_P_IPV6)) {
2868                                 /* nexthdr may not be TCP immediately. */
2869                                 proto = ipv6_hdr(skb)->nexthdr;
2870                         }
2871 #endif
2872                         if (proto == IPPROTO_TCP) {
2873                                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2874                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2875                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2876                                               (0, skb_transport_offset(skb)));
2877 
2878                                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2879 
2880                                 if (unlikely(skb_headlen(skb) <
2881                                             skb_transport_offset(skb) +
2882                                     tcp_hdrlen(skb))) {
2883                                         BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2884                                         return -EINVAL;
2885                                 }
2886                         } else if (proto == IPPROTO_UDP) {
2887                                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2888                                 txqent->hdr.wi.l4_hdr_size_n_offset =
2889                                         htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2890                                               (0, skb_transport_offset(skb)));
2891 
2892                                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2893                                 if (unlikely(skb_headlen(skb) <
2894                                             skb_transport_offset(skb) +
2895                                     sizeof(struct udphdr))) {
2896                                         BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2897                                         return -EINVAL;
2898                                 }
2899                         } else {
2900 
2901                                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2902                                 return -EINVAL;
2903                         }
2904                 } else
2905                         txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2906         }
2907 
2908         txqent->hdr.wi.flags = htons(flags);
2909         txqent->hdr.wi.frame_length = htonl(skb->len);
2910 
2911         return 0;
2912 }
2913 
2914 /*
2915  * bnad_start_xmit : Netdev entry point for Transmit
2916  *                   Called under lock held by net_device
2917  */
2918 static netdev_tx_t
2919 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2920 {
2921         struct bnad *bnad = netdev_priv(netdev);
2922         u32 txq_id = 0;
2923         struct bna_tcb *tcb = NULL;
2924         struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2925         u32             prod, q_depth, vect_id;
2926         u32             wis, vectors, len;
2927         int             i;
2928         dma_addr_t              dma_addr;
2929         struct bna_txq_entry *txqent;
2930 
2931         len = skb_headlen(skb);
2932 
2933         /* Sanity checks for the skb */
2934 
2935         if (unlikely(skb->len <= ETH_HLEN)) {
2936                 dev_kfree_skb_any(skb);
2937                 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2938                 return NETDEV_TX_OK;
2939         }
2940         if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2941                 dev_kfree_skb_any(skb);
2942                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943                 return NETDEV_TX_OK;
2944         }
2945         if (unlikely(len == 0)) {
2946                 dev_kfree_skb_any(skb);
2947                 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2948                 return NETDEV_TX_OK;
2949         }
2950 
2951         tcb = bnad->tx_info[0].tcb[txq_id];
2952 
2953         /*
2954          * Takes care of the Tx that is scheduled between clearing the flag
2955          * and the netif_tx_stop_all_queues() call.
2956          */
2957         if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2958                 dev_kfree_skb_any(skb);
2959                 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2960                 return NETDEV_TX_OK;
2961         }
2962 
2963         q_depth = tcb->q_depth;
2964         prod = tcb->producer_index;
2965         unmap_q = tcb->unmap_q;
2966 
2967         vectors = 1 + skb_shinfo(skb)->nr_frags;
2968         wis = BNA_TXQ_WI_NEEDED(vectors);       /* 4 vectors per work item */
2969 
2970         if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2971                 dev_kfree_skb_any(skb);
2972                 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2973                 return NETDEV_TX_OK;
2974         }
2975 
2976         /* Check for available TxQ resources */
2977         if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2978                 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2979                     !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2980                         u32 sent;
2981                         sent = bnad_txcmpl_process(bnad, tcb);
2982                         if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2983                                 bna_ib_ack(tcb->i_dbell, sent);
2984                         smp_mb__before_atomic();
2985                         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2986                 } else {
2987                         netif_stop_queue(netdev);
2988                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2989                 }
2990 
2991                 smp_mb();
2992                 /*
2993                  * Check again to deal with race condition between
2994                  * netif_stop_queue here, and netif_wake_queue in
2995                  * interrupt handler which is not inside netif tx lock.
2996                  */
2997                 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2998                         BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2999                         return NETDEV_TX_BUSY;
3000                 } else {
3001                         netif_wake_queue(netdev);
3002                         BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3003                 }
3004         }
3005 
3006         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3007         head_unmap = &unmap_q[prod];
3008 
3009         /* Program the opcode, flags, frame_len, num_vectors in WI */
3010         if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3011                 dev_kfree_skb_any(skb);
3012                 return NETDEV_TX_OK;
3013         }
3014         txqent->hdr.wi.reserved = 0;
3015         txqent->hdr.wi.num_vectors = vectors;
3016 
3017         head_unmap->skb = skb;
3018         head_unmap->nvecs = 0;
3019 
3020         /* Program the vectors */
3021         unmap = head_unmap;
3022         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3023                                   len, DMA_TO_DEVICE);
3024         if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3025                 dev_kfree_skb_any(skb);
3026                 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3027                 return NETDEV_TX_OK;
3028         }
3029         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3030         txqent->vector[0].length = htons(len);
3031         dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3032         head_unmap->nvecs++;
3033 
3034         for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3035                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3036                 u32             size = skb_frag_size(frag);
3037 
3038                 if (unlikely(size == 0)) {
3039                         /* Undo the changes starting at tcb->producer_index */
3040                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3041                                 tcb->producer_index);
3042                         dev_kfree_skb_any(skb);
3043                         BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3044                         return NETDEV_TX_OK;
3045                 }
3046 
3047                 len += size;
3048 
3049                 vect_id++;
3050                 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3051                         vect_id = 0;
3052                         BNA_QE_INDX_INC(prod, q_depth);
3053                         txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3054                         txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3055                         unmap = &unmap_q[prod];
3056                 }
3057 
3058                 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3059                                             0, size, DMA_TO_DEVICE);
3060                 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3061                         /* Undo the changes starting at tcb->producer_index */
3062                         bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3063                                            tcb->producer_index);
3064                         dev_kfree_skb_any(skb);
3065                         BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3066                         return NETDEV_TX_OK;
3067                 }
3068 
3069                 dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3070                 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3071                 txqent->vector[vect_id].length = htons(size);
3072                 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3073                                    dma_addr);
3074                 head_unmap->nvecs++;
3075         }
3076 
3077         if (unlikely(len != skb->len)) {
3078                 /* Undo the changes starting at tcb->producer_index */
3079                 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3080                 dev_kfree_skb_any(skb);
3081                 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3082                 return NETDEV_TX_OK;
3083         }
3084 
3085         BNA_QE_INDX_INC(prod, q_depth);
3086         tcb->producer_index = prod;
3087 
3088         wmb();
3089 
3090         if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3091                 return NETDEV_TX_OK;
3092 
3093         skb_tx_timestamp(skb);
3094 
3095         bna_txq_prod_indx_doorbell(tcb);
3096 
3097         return NETDEV_TX_OK;
3098 }
3099 
3100 /*
3101  * Used spin_lock to synchronize reading of stats structures, which
3102  * is written by BNA under the same lock.
3103  */
3104 static void
3105 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3106 {
3107         struct bnad *bnad = netdev_priv(netdev);
3108         unsigned long flags;
3109 
3110         spin_lock_irqsave(&bnad->bna_lock, flags);
3111 
3112         bnad_netdev_qstats_fill(bnad, stats);
3113         bnad_netdev_hwstats_fill(bnad, stats);
3114 
3115         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3116 }
3117 
3118 static void
3119 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3120 {
3121         struct net_device *netdev = bnad->netdev;
3122         int uc_count = netdev_uc_count(netdev);
3123         enum bna_cb_status ret;
3124         u8 *mac_list;
3125         struct netdev_hw_addr *ha;
3126         int entry;
3127 
3128         if (netdev_uc_empty(bnad->netdev)) {
3129                 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3130                 return;
3131         }
3132 
3133         if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3134                 goto mode_default;
3135 
3136         mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3137         if (mac_list == NULL)
3138                 goto mode_default;
3139 
3140         entry = 0;
3141         netdev_for_each_uc_addr(ha, netdev) {
3142                 ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3143                 entry++;
3144         }
3145 
3146         ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3147         kfree(mac_list);
3148 
3149         if (ret != BNA_CB_SUCCESS)
3150                 goto mode_default;
3151 
3152         return;
3153 
3154         /* ucast packets not in UCAM are routed to default function */
3155 mode_default:
3156         bnad->cfg_flags |= BNAD_CF_DEFAULT;
3157         bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3158 }
3159 
3160 static void
3161 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3162 {
3163         struct net_device *netdev = bnad->netdev;
3164         int mc_count = netdev_mc_count(netdev);
3165         enum bna_cb_status ret;
3166         u8 *mac_list;
3167 
3168         if (netdev->flags & IFF_ALLMULTI)
3169                 goto mode_allmulti;
3170 
3171         if (netdev_mc_empty(netdev))
3172                 return;
3173 
3174         if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3175                 goto mode_allmulti;
3176 
3177         mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3178 
3179         if (mac_list == NULL)
3180                 goto mode_allmulti;
3181 
3182         ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3183 
3184         /* copy rest of the MCAST addresses */
3185         bnad_netdev_mc_list_get(netdev, mac_list);
3186         ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3187         kfree(mac_list);
3188 
3189         if (ret != BNA_CB_SUCCESS)
3190                 goto mode_allmulti;
3191 
3192         return;
3193 
3194 mode_allmulti:
3195         bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3196         bna_rx_mcast_delall(bnad->rx_info[0].rx);
3197 }
3198 
3199 void
3200 bnad_set_rx_mode(struct net_device *netdev)
3201 {
3202         struct bnad *bnad = netdev_priv(netdev);
3203         enum bna_rxmode new_mode, mode_mask;
3204         unsigned long flags;
3205 
3206         spin_lock_irqsave(&bnad->bna_lock, flags);
3207 
3208         if (bnad->rx_info[0].rx == NULL) {
3209                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3210                 return;
3211         }
3212 
3213         /* clear bnad flags to update it with new settings */
3214         bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3215                         BNAD_CF_ALLMULTI);
3216 
3217         new_mode = 0;
3218         if (netdev->flags & IFF_PROMISC) {
3219                 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3220                 bnad->cfg_flags |= BNAD_CF_PROMISC;
3221         } else {
3222                 bnad_set_rx_mcast_fltr(bnad);
3223 
3224                 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3225                         new_mode |= BNA_RXMODE_ALLMULTI;
3226 
3227                 bnad_set_rx_ucast_fltr(bnad);
3228 
3229                 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3230                         new_mode |= BNA_RXMODE_DEFAULT;
3231         }
3232 
3233         mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3234                         BNA_RXMODE_ALLMULTI;
3235         bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3236 
3237         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3238 }
3239 
3240 /*
3241  * bna_lock is used to sync writes to netdev->addr
3242  * conf_lock cannot be used since this call may be made
3243  * in a non-blocking context.
3244  */
3245 static int
3246 bnad_set_mac_address(struct net_device *netdev, void *addr)
3247 {
3248         int err;
3249         struct bnad *bnad = netdev_priv(netdev);
3250         struct sockaddr *sa = (struct sockaddr *)addr;
3251         unsigned long flags;
3252 
3253         spin_lock_irqsave(&bnad->bna_lock, flags);
3254 
3255         err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3256         if (!err)
3257                 ether_addr_copy(netdev->dev_addr, sa->sa_data);
3258 
3259         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3260 
3261         return err;
3262 }
3263 
3264 static int
3265 bnad_mtu_set(struct bnad *bnad, int frame_size)
3266 {
3267         unsigned long flags;
3268 
3269         init_completion(&bnad->bnad_completions.mtu_comp);
3270 
3271         spin_lock_irqsave(&bnad->bna_lock, flags);
3272         bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3273         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3274 
3275         wait_for_completion(&bnad->bnad_completions.mtu_comp);
3276 
3277         return bnad->bnad_completions.mtu_comp_status;
3278 }
3279 
3280 static int
3281 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3282 {
3283         int err, mtu;
3284         struct bnad *bnad = netdev_priv(netdev);
3285         u32 rx_count = 0, frame, new_frame;
3286 
3287         mutex_lock(&bnad->conf_mutex);
3288 
3289         mtu = netdev->mtu;
3290         netdev->mtu = new_mtu;
3291 
3292         frame = BNAD_FRAME_SIZE(mtu);
3293         new_frame = BNAD_FRAME_SIZE(new_mtu);
3294 
3295         /* check if multi-buffer needs to be enabled */
3296         if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3297             netif_running(bnad->netdev)) {
3298                 /* only when transition is over 4K */
3299                 if ((frame <= 4096 && new_frame > 4096) ||
3300                     (frame > 4096 && new_frame <= 4096))
3301                         rx_count = bnad_reinit_rx(bnad);
3302         }
3303 
3304         /* rx_count > 0 - new rx created
3305          *      - Linux set err = 0 and return
3306          */
3307         err = bnad_mtu_set(bnad, new_frame);
3308         if (err)
3309                 err = -EBUSY;
3310 
3311         mutex_unlock(&bnad->conf_mutex);
3312         return err;
3313 }
3314 
3315 static int
3316 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3317 {
3318         struct bnad *bnad = netdev_priv(netdev);
3319         unsigned long flags;
3320 
3321         if (!bnad->rx_info[0].rx)
3322                 return 0;
3323 
3324         mutex_lock(&bnad->conf_mutex);
3325 
3326         spin_lock_irqsave(&bnad->bna_lock, flags);
3327         bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3328         set_bit(vid, bnad->active_vlans);
3329         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3330 
3331         mutex_unlock(&bnad->conf_mutex);
3332 
3333         return 0;
3334 }
3335 
3336 static int
3337 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3338 {
3339         struct bnad *bnad = netdev_priv(netdev);
3340         unsigned long flags;
3341 
3342         if (!bnad->rx_info[0].rx)
3343                 return 0;
3344 
3345         mutex_lock(&bnad->conf_mutex);
3346 
3347         spin_lock_irqsave(&bnad->bna_lock, flags);
3348         clear_bit(vid, bnad->active_vlans);
3349         bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3350         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3351 
3352         mutex_unlock(&bnad->conf_mutex);
3353 
3354         return 0;
3355 }
3356 
3357 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3358 {
3359         struct bnad *bnad = netdev_priv(dev);
3360         netdev_features_t changed = features ^ dev->features;
3361 
3362         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3363                 unsigned long flags;
3364 
3365                 spin_lock_irqsave(&bnad->bna_lock, flags);
3366 
3367                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3368                         bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3369                 else
3370                         bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3371 
3372                 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3373         }
3374 
3375         return 0;
3376 }
3377 
3378 #ifdef CONFIG_NET_POLL_CONTROLLER
3379 static void
3380 bnad_netpoll(struct net_device *netdev)
3381 {
3382         struct bnad *bnad = netdev_priv(netdev);
3383         struct bnad_rx_info *rx_info;
3384         struct bnad_rx_ctrl *rx_ctrl;
3385         u32 curr_mask;
3386         int i, j;
3387 
3388         if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3389                 bna_intx_disable(&bnad->bna, curr_mask);
3390                 bnad_isr(bnad->pcidev->irq, netdev);
3391                 bna_intx_enable(&bnad->bna, curr_mask);
3392         } else {
3393                 /*
3394                  * Tx processing may happen in sending context, so no need
3395                  * to explicitly process completions here
3396                  */
3397 
3398                 /* Rx processing */
3399                 for (i = 0; i < bnad->num_rx; i++) {
3400                         rx_info = &bnad->rx_info[i];
3401                         if (!rx_info->rx)
3402                                 continue;
3403                         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3404                                 rx_ctrl = &rx_info->rx_ctrl[j];
3405                                 if (rx_ctrl->ccb)
3406                                         bnad_netif_rx_schedule_poll(bnad,
3407                                                             rx_ctrl->ccb);
3408                         }
3409                 }
3410         }
3411 }
3412 #endif
3413 
3414 static const struct net_device_ops bnad_netdev_ops = {
3415         .ndo_open               = bnad_open,
3416         .ndo_stop               = bnad_stop,
3417         .ndo_start_xmit         = bnad_start_xmit,
3418         .ndo_get_stats64        = bnad_get_stats64,
3419         .ndo_set_rx_mode        = bnad_set_rx_mode,
3420         .ndo_validate_addr      = eth_validate_addr,
3421         .ndo_set_mac_address    = bnad_set_mac_address,
3422         .ndo_change_mtu         = bnad_change_mtu,
3423         .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3424         .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3425         .ndo_set_features       = bnad_set_features,
3426 #ifdef CONFIG_NET_POLL_CONTROLLER
3427         .ndo_poll_controller    = bnad_netpoll
3428 #endif
3429 };
3430 
3431 static void
3432 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3433 {
3434         struct net_device *netdev = bnad->netdev;
3435 
3436         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3437                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3438                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3439                 NETIF_F_HW_VLAN_CTAG_RX;
3440 
3441         netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3442                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3443                 NETIF_F_TSO | NETIF_F_TSO6;
3444 
3445         netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3446 
3447         if (using_dac)
3448                 netdev->features |= NETIF_F_HIGHDMA;
3449 
3450         netdev->mem_start = bnad->mmio_start;
3451         netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3452 
3453         /* MTU range: 46 - 9000 */
3454         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3455         netdev->max_mtu = BNAD_JUMBO_MTU;
3456 
3457         netdev->netdev_ops = &bnad_netdev_ops;
3458         bnad_set_ethtool_ops(netdev);
3459 }
3460 
3461 /*
3462  * 1. Initialize the bnad structure
3463  * 2. Setup netdev pointer in pci_dev
3464  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3465  * 4. Initialize work queue.
3466  */
3467 static int
3468 bnad_init(struct bnad *bnad,
3469           struct pci_dev *pdev, struct net_device *netdev)
3470 {
3471         unsigned long flags;
3472 
3473         SET_NETDEV_DEV(netdev, &pdev->dev);
3474         pci_set_drvdata(pdev, netdev);
3475 
3476         bnad->netdev = netdev;
3477         bnad->pcidev = pdev;
3478         bnad->mmio_start = pci_resource_start(pdev, 0);
3479         bnad->mmio_len = pci_resource_len(pdev, 0);
3480         bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3481         if (!bnad->bar0) {
3482                 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3483                 return -ENOMEM;
3484         }
3485         dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3486                  (unsigned long long) bnad->mmio_len);
3487 
3488         spin_lock_irqsave(&bnad->bna_lock, flags);
3489         if (!bnad_msix_disable)
3490                 bnad->cfg_flags = BNAD_CF_MSIX;
3491 
3492         bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3493 
3494         bnad_q_num_init(bnad);
3495         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3496 
3497         bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3498                 (bnad->num_rx * bnad->num_rxp_per_rx) +
3499                          BNAD_MAILBOX_MSIX_VECTORS;
3500 
3501         bnad->txq_depth = BNAD_TXQ_DEPTH;
3502         bnad->rxq_depth = BNAD_RXQ_DEPTH;
3503 
3504         bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3505         bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3506 
3507         sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3508         bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3509         if (!bnad->work_q) {
3510                 iounmap(bnad->bar0);
3511                 return -ENOMEM;
3512         }
3513 
3514         return 0;
3515 }
3516 
3517 /*
3518  * Must be called after bnad_pci_uninit()
3519  * so that iounmap() and pci_set_drvdata(NULL)
3520  * happens only after PCI uninitialization.
3521  */
3522 static void
3523 bnad_uninit(struct bnad *bnad)
3524 {
3525         if (bnad->work_q) {
3526                 flush_workqueue(bnad->work_q);
3527                 destroy_workqueue(bnad->work_q);
3528                 bnad->work_q = NULL;
3529         }
3530 
3531         if (bnad->bar0)
3532                 iounmap(bnad->bar0);
3533 }
3534 
3535 /*
3536  * Initialize locks
3537         a) Per ioceth mutes used for serializing configuration
3538            changes from OS interface
3539         b) spin lock used to protect bna state machine
3540  */
3541 static void
3542 bnad_lock_init(struct bnad *bnad)
3543 {
3544         spin_lock_init(&bnad->bna_lock);
3545         mutex_init(&bnad->conf_mutex);
3546 }
3547 
3548 static void
3549 bnad_lock_uninit(struct bnad *bnad)
3550 {
3551         mutex_destroy(&bnad->conf_mutex);
3552 }
3553 
3554 /* PCI Initialization */
3555 static int
3556 bnad_pci_init(struct bnad *bnad,
3557               struct pci_dev *pdev, bool *using_dac)
3558 {
3559         int err;
3560 
3561         err = pci_enable_device(pdev);
3562         if (err)
3563                 return err;
3564         err = pci_request_regions(pdev, BNAD_NAME);
3565         if (err)
3566                 goto disable_device;
3567         if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3568                 *using_dac = true;
3569         } else {
3570                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3571                 if (err)
3572                         goto release_regions;
3573                 *using_dac = false;
3574         }
3575         pci_set_master(pdev);
3576         return 0;
3577 
3578 release_regions:
3579         pci_release_regions(pdev);
3580 disable_device:
3581         pci_disable_device(pdev);
3582 
3583         return err;
3584 }
3585 
3586 static void
3587 bnad_pci_uninit(struct pci_dev *pdev)
3588 {
3589         pci_release_regions(pdev);
3590         pci_disable_device(pdev);
3591 }
3592 
3593 static int
3594 bnad_pci_probe(struct pci_dev *pdev,
3595                 const struct pci_device_id *pcidev_id)
3596 {
3597         bool    using_dac;
3598         int     err;
3599         struct bnad *bnad;
3600         struct bna *bna;
3601         struct net_device *netdev;
3602         struct bfa_pcidev pcidev_info;
3603         unsigned long flags;
3604 
3605         mutex_lock(&bnad_fwimg_mutex);
3606         if (!cna_get_firmware_buf(pdev)) {
3607                 mutex_unlock(&bnad_fwimg_mutex);
3608                 dev_err(&pdev->dev, "failed to load firmware image!\n");
3609                 return -ENODEV;
3610         }
3611         mutex_unlock(&bnad_fwimg_mutex);
3612 
3613         /*
3614          * Allocates sizeof(struct net_device + struct bnad)
3615          * bnad = netdev->priv
3616          */
3617         netdev = alloc_etherdev(sizeof(struct bnad));
3618         if (!netdev) {
3619                 err = -ENOMEM;
3620                 return err;
3621         }
3622         bnad = netdev_priv(netdev);
3623         bnad_lock_init(bnad);
3624         bnad->id = atomic_inc_return(&bna_id) - 1;
3625 
3626         mutex_lock(&bnad->conf_mutex);
3627         /*
3628          * PCI initialization
3629          *      Output : using_dac = 1 for 64 bit DMA
3630          *                         = 0 for 32 bit DMA
3631          */
3632         using_dac = false;
3633         err = bnad_pci_init(bnad, pdev, &using_dac);
3634         if (err)
3635                 goto unlock_mutex;
3636 
3637         /*
3638          * Initialize bnad structure
3639          * Setup relation between pci_dev & netdev
3640          */
3641         err = bnad_init(bnad, pdev, netdev);
3642         if (err)
3643                 goto pci_uninit;
3644 
3645         /* Initialize netdev structure, set up ethtool ops */
3646         bnad_netdev_init(bnad, using_dac);
3647 
3648         /* Set link to down state */
3649         netif_carrier_off(netdev);
3650 
3651         /* Setup the debugfs node for this bfad */
3652         if (bna_debugfs_enable)
3653                 bnad_debugfs_init(bnad);
3654 
3655         /* Get resource requirement form bna */
3656         spin_lock_irqsave(&bnad->bna_lock, flags);
3657         bna_res_req(&bnad->res_info[0]);
3658         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3659 
3660         /* Allocate resources from bna */
3661         err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3662         if (err)
3663                 goto drv_uninit;
3664 
3665         bna = &bnad->bna;
3666 
3667         /* Setup pcidev_info for bna_init() */
3668         pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3669         pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3670         pcidev_info.device_id = bnad->pcidev->device;
3671         pcidev_info.pci_bar_kva = bnad->bar0;
3672 
3673         spin_lock_irqsave(&bnad->bna_lock, flags);
3674         bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3675         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3676 
3677         bnad->stats.bna_stats = &bna->stats;
3678 
3679         bnad_enable_msix(bnad);
3680         err = bnad_mbox_irq_alloc(bnad);
3681         if (err)
3682                 goto res_free;
3683 
3684         /* Set up timers */
3685         timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3686         timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3687         timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3688         timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3689                     0);
3690 
3691         /*
3692          * Start the chip
3693          * If the call back comes with error, we bail out.
3694          * This is a catastrophic error.
3695          */
3696         err = bnad_ioceth_enable(bnad);
3697         if (err) {
3698                 dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3699                 goto probe_success;
3700         }
3701 
3702         spin_lock_irqsave(&bnad->bna_lock, flags);
3703         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3704                 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3705                 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3706                         bna_attr(bna)->num_rxp - 1);
3707                 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3708                         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3709                         err = -EIO;
3710         }
3711         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3712         if (err)
3713                 goto disable_ioceth;
3714 
3715         spin_lock_irqsave(&bnad->bna_lock, flags);
3716         bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3717         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3718 
3719         err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3720         if (err) {
3721                 err = -EIO;
3722                 goto disable_ioceth;
3723         }
3724 
3725         spin_lock_irqsave(&bnad->bna_lock, flags);
3726         bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3727         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3728 
3729         /* Get the burnt-in mac */
3730         spin_lock_irqsave(&bnad->bna_lock, flags);
3731         bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3732         bnad_set_netdev_perm_addr(bnad);
3733         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3734 
3735         mutex_unlock(&bnad->conf_mutex);
3736 
3737         /* Finally, reguister with net_device layer */
3738         err = register_netdev(netdev);
3739         if (err) {
3740                 dev_err(&pdev->dev, "registering net device failed\n");
3741                 goto probe_uninit;
3742         }
3743         set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3744 
3745         return 0;
3746 
3747 probe_success:
3748         mutex_unlock(&bnad->conf_mutex);
3749         return 0;
3750 
3751 probe_uninit:
3752         mutex_lock(&bnad->conf_mutex);
3753         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3754 disable_ioceth:
3755         bnad_ioceth_disable(bnad);
3756         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3757         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3758         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3759         spin_lock_irqsave(&bnad->bna_lock, flags);
3760         bna_uninit(bna);
3761         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3762         bnad_mbox_irq_free(bnad);
3763         bnad_disable_msix(bnad);
3764 res_free:
3765         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3766 drv_uninit:
3767         /* Remove the debugfs node for this bnad */
3768         kfree(bnad->regdata);
3769         bnad_debugfs_uninit(bnad);
3770         bnad_uninit(bnad);
3771 pci_uninit:
3772         bnad_pci_uninit(pdev);
3773 unlock_mutex:
3774         mutex_unlock(&bnad->conf_mutex);
3775         bnad_lock_uninit(bnad);
3776         free_netdev(netdev);
3777         return err;
3778 }
3779 
3780 static void
3781 bnad_pci_remove(struct pci_dev *pdev)
3782 {
3783         struct net_device *netdev = pci_get_drvdata(pdev);
3784         struct bnad *bnad;
3785         struct bna *bna;
3786         unsigned long flags;
3787 
3788         if (!netdev)
3789                 return;
3790 
3791         bnad = netdev_priv(netdev);
3792         bna = &bnad->bna;
3793 
3794         if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3795                 unregister_netdev(netdev);
3796 
3797         mutex_lock(&bnad->conf_mutex);
3798         bnad_ioceth_disable(bnad);
3799         del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3800         del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3801         del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3802         spin_lock_irqsave(&bnad->bna_lock, flags);
3803         bna_uninit(bna);
3804         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3805 
3806         bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3807         bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3808         bnad_mbox_irq_free(bnad);
3809         bnad_disable_msix(bnad);
3810         bnad_pci_uninit(pdev);
3811         mutex_unlock(&bnad->conf_mutex);
3812         bnad_lock_uninit(bnad);
3813         /* Remove the debugfs node for this bnad */
3814         kfree(bnad->regdata);
3815         bnad_debugfs_uninit(bnad);
3816         bnad_uninit(bnad);
3817         free_netdev(netdev);
3818 }
3819 
3820 static const struct pci_device_id bnad_pci_id_table[] = {
3821         {
3822                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3823                         PCI_DEVICE_ID_BROCADE_CT),
3824                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3825                 .class_mask =  0xffff00
3826         },
3827         {
3828                 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3829                         BFA_PCI_DEVICE_ID_CT2),
3830                 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3831                 .class_mask =  0xffff00
3832         },
3833         {0,  },
3834 };
3835 
3836 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3837 
3838 static struct pci_driver bnad_pci_driver = {
3839         .name = BNAD_NAME,
3840         .id_table = bnad_pci_id_table,
3841         .probe = bnad_pci_probe,
3842         .remove = bnad_pci_remove,
3843 };
3844 
3845 static int __init
3846 bnad_module_init(void)
3847 {
3848         int err;
3849 
3850         pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n",
3851                 BNAD_VERSION);
3852 
3853         bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3854 
3855         err = pci_register_driver(&bnad_pci_driver);
3856         if (err < 0) {
3857                 pr_err("bna: PCI driver registration failed err=%d\n", err);
3858                 return err;
3859         }
3860 
3861         return 0;
3862 }
3863 
3864 static void __exit
3865 bnad_module_exit(void)
3866 {
3867         pci_unregister_driver(&bnad_pci_driver);
3868         release_firmware(bfi_fw);
3869 }
3870 
3871 module_init(bnad_module_init);
3872 module_exit(bnad_module_exit);
3873 
3874 MODULE_AUTHOR("Brocade");
3875 MODULE_LICENSE("GPL");
3876 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3877 MODULE_VERSION(BNAD_VERSION);
3878 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3879 MODULE_FIRMWARE(CNA_FW_FILE_CT2);

/* [<][>][^][v][top][bottom][index][help] */