root/drivers/net/ethernet/pensando/ionic/ionic_lif.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ionic_lif_deferred_work
  2. ionic_lif_deferred_enqueue
  3. ionic_link_status_check
  4. ionic_link_status_check_request
  5. ionic_isr
  6. ionic_request_irq
  7. ionic_intr_alloc
  8. ionic_intr_free
  9. ionic_qcq_enable
  10. ionic_qcq_disable
  11. ionic_lif_qcq_deinit
  12. ionic_qcq_free
  13. ionic_qcqs_free
  14. ionic_link_qcq_interrupts
  15. ionic_qcq_alloc
  16. ionic_qcqs_alloc
  17. ionic_lif_txq_init
  18. ionic_lif_rxq_init
  19. ionic_notifyq_service
  20. ionic_notifyq_clean
  21. ionic_adminq_service
  22. ionic_adminq_napi
  23. ionic_get_stats64
  24. ionic_lif_addr_add
  25. ionic_lif_addr_del
  26. ionic_lif_addr
  27. ionic_addr_add
  28. ionic_addr_del
  29. ionic_lif_rx_mode
  30. _ionic_lif_rx_mode
  31. ionic_set_rx_mode
  32. ionic_netdev_features_to_nic
  33. ionic_set_nic_features
  34. ionic_init_nic_features
  35. ionic_set_features
  36. ionic_set_mac_address
  37. ionic_change_mtu
  38. ionic_tx_timeout_work
  39. ionic_tx_timeout
  40. ionic_vlan_rx_add_vid
  41. ionic_vlan_rx_kill_vid
  42. ionic_lif_rss_config
  43. ionic_lif_rss_init
  44. ionic_lif_rss_deinit
  45. ionic_txrx_disable
  46. ionic_txrx_deinit
  47. ionic_txrx_free
  48. ionic_txrx_alloc
  49. ionic_txrx_init
  50. ionic_txrx_enable
  51. ionic_open
  52. ionic_stop
  53. ionic_reset_queues
  54. ionic_lif_alloc
  55. ionic_lifs_alloc
  56. ionic_lif_reset
  57. ionic_lif_free
  58. ionic_lifs_free
  59. ionic_lif_deinit
  60. ionic_lifs_deinit
  61. ionic_lif_adminq_init
  62. ionic_lif_notifyq_init
  63. ionic_station_set
  64. ionic_lif_init
  65. ionic_lifs_init
  66. ionic_lif_notify_work
  67. ionic_lif_set_netdev_info
  68. ionic_netdev_lif
  69. ionic_lif_notify
  70. ionic_lifs_register
  71. ionic_lifs_unregister
  72. ionic_lif_identify
  73. ionic_lifs_size

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
   3 
   4 #include <linux/printk.h>
   5 #include <linux/dynamic_debug.h>
   6 #include <linux/netdevice.h>
   7 #include <linux/etherdevice.h>
   8 #include <linux/rtnetlink.h>
   9 #include <linux/interrupt.h>
  10 #include <linux/pci.h>
  11 #include <linux/cpumask.h>
  12 
  13 #include "ionic.h"
  14 #include "ionic_bus.h"
  15 #include "ionic_lif.h"
  16 #include "ionic_txrx.h"
  17 #include "ionic_ethtool.h"
  18 #include "ionic_debugfs.h"
  19 
  20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
  21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
  22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
  23 static void ionic_link_status_check(struct ionic_lif *lif);
  24 
  25 static void ionic_lif_deferred_work(struct work_struct *work)
  26 {
  27         struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
  28         struct ionic_deferred *def = &lif->deferred;
  29         struct ionic_deferred_work *w = NULL;
  30 
  31         spin_lock_bh(&def->lock);
  32         if (!list_empty(&def->list)) {
  33                 w = list_first_entry(&def->list,
  34                                      struct ionic_deferred_work, list);
  35                 list_del(&w->list);
  36         }
  37         spin_unlock_bh(&def->lock);
  38 
  39         if (w) {
  40                 switch (w->type) {
  41                 case IONIC_DW_TYPE_RX_MODE:
  42                         ionic_lif_rx_mode(lif, w->rx_mode);
  43                         break;
  44                 case IONIC_DW_TYPE_RX_ADDR_ADD:
  45                         ionic_lif_addr_add(lif, w->addr);
  46                         break;
  47                 case IONIC_DW_TYPE_RX_ADDR_DEL:
  48                         ionic_lif_addr_del(lif, w->addr);
  49                         break;
  50                 case IONIC_DW_TYPE_LINK_STATUS:
  51                         ionic_link_status_check(lif);
  52                         break;
  53                 default:
  54                         break;
  55                 }
  56                 kfree(w);
  57                 schedule_work(&def->work);
  58         }
  59 }
  60 
  61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
  62                                        struct ionic_deferred_work *work)
  63 {
  64         spin_lock_bh(&def->lock);
  65         list_add_tail(&work->list, &def->list);
  66         spin_unlock_bh(&def->lock);
  67         schedule_work(&def->work);
  68 }
  69 
  70 static void ionic_link_status_check(struct ionic_lif *lif)
  71 {
  72         struct net_device *netdev = lif->netdev;
  73         u16 link_status;
  74         bool link_up;
  75 
  76         link_status = le16_to_cpu(lif->info->status.link_status);
  77         link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
  78 
  79         /* filter out the no-change cases */
  80         if (link_up == netif_carrier_ok(netdev))
  81                 goto link_out;
  82 
  83         if (link_up) {
  84                 netdev_info(netdev, "Link up - %d Gbps\n",
  85                             le32_to_cpu(lif->info->status.link_speed) / 1000);
  86 
  87                 if (test_bit(IONIC_LIF_UP, lif->state)) {
  88                         netif_tx_wake_all_queues(lif->netdev);
  89                         netif_carrier_on(netdev);
  90                 }
  91         } else {
  92                 netdev_info(netdev, "Link down\n");
  93 
  94                 /* carrier off first to avoid watchdog timeout */
  95                 netif_carrier_off(netdev);
  96                 if (test_bit(IONIC_LIF_UP, lif->state))
  97                         netif_tx_stop_all_queues(netdev);
  98         }
  99 
 100 link_out:
 101         clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
 102 }
 103 
 104 static void ionic_link_status_check_request(struct ionic_lif *lif)
 105 {
 106         struct ionic_deferred_work *work;
 107 
 108         /* we only need one request outstanding at a time */
 109         if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
 110                 return;
 111 
 112         if (in_interrupt()) {
 113                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
 114                 if (!work)
 115                         return;
 116 
 117                 work->type = IONIC_DW_TYPE_LINK_STATUS;
 118                 ionic_lif_deferred_enqueue(&lif->deferred, work);
 119         } else {
 120                 ionic_link_status_check(lif);
 121         }
 122 }
 123 
 124 static irqreturn_t ionic_isr(int irq, void *data)
 125 {
 126         struct napi_struct *napi = data;
 127 
 128         napi_schedule_irqoff(napi);
 129 
 130         return IRQ_HANDLED;
 131 }
 132 
 133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
 134 {
 135         struct ionic_intr_info *intr = &qcq->intr;
 136         struct device *dev = lif->ionic->dev;
 137         struct ionic_queue *q = &qcq->q;
 138         const char *name;
 139 
 140         if (lif->registered)
 141                 name = lif->netdev->name;
 142         else
 143                 name = dev_name(dev);
 144 
 145         snprintf(intr->name, sizeof(intr->name),
 146                  "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
 147 
 148         return devm_request_irq(dev, intr->vector, ionic_isr,
 149                                 0, intr->name, &qcq->napi);
 150 }
 151 
 152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
 153 {
 154         struct ionic *ionic = lif->ionic;
 155         int index;
 156 
 157         index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
 158         if (index == ionic->nintrs) {
 159                 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
 160                             __func__, index, ionic->nintrs);
 161                 return -ENOSPC;
 162         }
 163 
 164         set_bit(index, ionic->intrs);
 165         ionic_intr_init(&ionic->idev, intr, index);
 166 
 167         return 0;
 168 }
 169 
 170 static void ionic_intr_free(struct ionic_lif *lif, int index)
 171 {
 172         if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
 173                 clear_bit(index, lif->ionic->intrs);
 174 }
 175 
 176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
 177 {
 178         struct ionic_queue *q = &qcq->q;
 179         struct ionic_lif *lif = q->lif;
 180         struct ionic_dev *idev;
 181         struct device *dev;
 182 
 183         struct ionic_admin_ctx ctx = {
 184                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 185                 .cmd.q_control = {
 186                         .opcode = IONIC_CMD_Q_CONTROL,
 187                         .lif_index = cpu_to_le16(lif->index),
 188                         .type = q->type,
 189                         .index = cpu_to_le32(q->index),
 190                         .oper = IONIC_Q_ENABLE,
 191                 },
 192         };
 193 
 194         idev = &lif->ionic->idev;
 195         dev = lif->ionic->dev;
 196 
 197         dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
 198                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
 199 
 200         if (qcq->flags & IONIC_QCQ_F_INTR) {
 201                 irq_set_affinity_hint(qcq->intr.vector,
 202                                       &qcq->intr.affinity_mask);
 203                 napi_enable(&qcq->napi);
 204                 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
 205                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
 206                                 IONIC_INTR_MASK_CLEAR);
 207         }
 208 
 209         return ionic_adminq_post_wait(lif, &ctx);
 210 }
 211 
 212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
 213 {
 214         struct ionic_queue *q = &qcq->q;
 215         struct ionic_lif *lif = q->lif;
 216         struct ionic_dev *idev;
 217         struct device *dev;
 218 
 219         struct ionic_admin_ctx ctx = {
 220                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 221                 .cmd.q_control = {
 222                         .opcode = IONIC_CMD_Q_CONTROL,
 223                         .lif_index = cpu_to_le16(lif->index),
 224                         .type = q->type,
 225                         .index = cpu_to_le32(q->index),
 226                         .oper = IONIC_Q_DISABLE,
 227                 },
 228         };
 229 
 230         idev = &lif->ionic->idev;
 231         dev = lif->ionic->dev;
 232 
 233         dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
 234                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
 235 
 236         if (qcq->flags & IONIC_QCQ_F_INTR) {
 237                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
 238                                 IONIC_INTR_MASK_SET);
 239                 synchronize_irq(qcq->intr.vector);
 240                 irq_set_affinity_hint(qcq->intr.vector, NULL);
 241                 napi_disable(&qcq->napi);
 242         }
 243 
 244         return ionic_adminq_post_wait(lif, &ctx);
 245 }
 246 
 247 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
 248 {
 249         struct ionic_dev *idev = &lif->ionic->idev;
 250         struct device *dev = lif->ionic->dev;
 251 
 252         if (!qcq)
 253                 return;
 254 
 255         ionic_debugfs_del_qcq(qcq);
 256 
 257         if (!(qcq->flags & IONIC_QCQ_F_INITED))
 258                 return;
 259 
 260         if (qcq->flags & IONIC_QCQ_F_INTR) {
 261                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
 262                                 IONIC_INTR_MASK_SET);
 263                 devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
 264                 netif_napi_del(&qcq->napi);
 265         }
 266 
 267         qcq->flags &= ~IONIC_QCQ_F_INITED;
 268 }
 269 
 270 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
 271 {
 272         struct device *dev = lif->ionic->dev;
 273 
 274         if (!qcq)
 275                 return;
 276 
 277         dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
 278         qcq->base = NULL;
 279         qcq->base_pa = 0;
 280 
 281         if (qcq->flags & IONIC_QCQ_F_INTR)
 282                 ionic_intr_free(lif, qcq->intr.index);
 283 
 284         devm_kfree(dev, qcq->cq.info);
 285         qcq->cq.info = NULL;
 286         devm_kfree(dev, qcq->q.info);
 287         qcq->q.info = NULL;
 288         devm_kfree(dev, qcq);
 289 }
 290 
 291 static void ionic_qcqs_free(struct ionic_lif *lif)
 292 {
 293         struct device *dev = lif->ionic->dev;
 294         unsigned int i;
 295 
 296         if (lif->notifyqcq) {
 297                 ionic_qcq_free(lif, lif->notifyqcq);
 298                 lif->notifyqcq = NULL;
 299         }
 300 
 301         if (lif->adminqcq) {
 302                 ionic_qcq_free(lif, lif->adminqcq);
 303                 lif->adminqcq = NULL;
 304         }
 305 
 306         for (i = 0; i < lif->nxqs; i++)
 307                 if (lif->rxqcqs[i].stats)
 308                         devm_kfree(dev, lif->rxqcqs[i].stats);
 309 
 310         devm_kfree(dev, lif->rxqcqs);
 311         lif->rxqcqs = NULL;
 312 
 313         for (i = 0; i < lif->nxqs; i++)
 314                 if (lif->txqcqs[i].stats)
 315                         devm_kfree(dev, lif->txqcqs[i].stats);
 316 
 317         devm_kfree(dev, lif->txqcqs);
 318         lif->txqcqs = NULL;
 319 }
 320 
 321 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
 322                                       struct ionic_qcq *n_qcq)
 323 {
 324         if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
 325                 ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
 326                 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
 327         }
 328 
 329         n_qcq->intr.vector = src_qcq->intr.vector;
 330         n_qcq->intr.index = src_qcq->intr.index;
 331 }
 332 
 333 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
 334                            unsigned int index,
 335                            const char *name, unsigned int flags,
 336                            unsigned int num_descs, unsigned int desc_size,
 337                            unsigned int cq_desc_size,
 338                            unsigned int sg_desc_size,
 339                            unsigned int pid, struct ionic_qcq **qcq)
 340 {
 341         struct ionic_dev *idev = &lif->ionic->idev;
 342         u32 q_size, cq_size, sg_size, total_size;
 343         struct device *dev = lif->ionic->dev;
 344         void *q_base, *cq_base, *sg_base;
 345         dma_addr_t cq_base_pa = 0;
 346         dma_addr_t sg_base_pa = 0;
 347         dma_addr_t q_base_pa = 0;
 348         struct ionic_qcq *new;
 349         int err;
 350 
 351         *qcq = NULL;
 352 
 353         q_size  = num_descs * desc_size;
 354         cq_size = num_descs * cq_desc_size;
 355         sg_size = num_descs * sg_desc_size;
 356 
 357         total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
 358         /* Note: aligning q_size/cq_size is not enough due to cq_base
 359          * address aligning as q_base could be not aligned to the page.
 360          * Adding PAGE_SIZE.
 361          */
 362         total_size += PAGE_SIZE;
 363         if (flags & IONIC_QCQ_F_SG) {
 364                 total_size += ALIGN(sg_size, PAGE_SIZE);
 365                 total_size += PAGE_SIZE;
 366         }
 367 
 368         new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
 369         if (!new) {
 370                 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
 371                 err = -ENOMEM;
 372                 goto err_out;
 373         }
 374 
 375         new->flags = flags;
 376 
 377         new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
 378                                    GFP_KERNEL);
 379         if (!new->q.info) {
 380                 netdev_err(lif->netdev, "Cannot allocate queue info\n");
 381                 err = -ENOMEM;
 382                 goto err_out;
 383         }
 384 
 385         new->q.type = type;
 386 
 387         err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
 388                            desc_size, sg_desc_size, pid);
 389         if (err) {
 390                 netdev_err(lif->netdev, "Cannot initialize queue\n");
 391                 goto err_out;
 392         }
 393 
 394         if (flags & IONIC_QCQ_F_INTR) {
 395                 err = ionic_intr_alloc(lif, &new->intr);
 396                 if (err) {
 397                         netdev_warn(lif->netdev, "no intr for %s: %d\n",
 398                                     name, err);
 399                         goto err_out;
 400                 }
 401 
 402                 err = ionic_bus_get_irq(lif->ionic, new->intr.index);
 403                 if (err < 0) {
 404                         netdev_warn(lif->netdev, "no vector for %s: %d\n",
 405                                     name, err);
 406                         goto err_out_free_intr;
 407                 }
 408                 new->intr.vector = err;
 409                 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
 410                                        IONIC_INTR_MASK_SET);
 411 
 412                 new->intr.cpu = new->intr.index % num_online_cpus();
 413                 if (cpu_online(new->intr.cpu))
 414                         cpumask_set_cpu(new->intr.cpu,
 415                                         &new->intr.affinity_mask);
 416         } else {
 417                 new->intr.index = INTR_INDEX_NOT_ASSIGNED;
 418         }
 419 
 420         new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
 421                                     GFP_KERNEL);
 422         if (!new->cq.info) {
 423                 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
 424                 err = -ENOMEM;
 425                 goto err_out_free_intr;
 426         }
 427 
 428         err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
 429         if (err) {
 430                 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
 431                 goto err_out_free_intr;
 432         }
 433 
 434         new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
 435                                        GFP_KERNEL);
 436         if (!new->base) {
 437                 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
 438                 err = -ENOMEM;
 439                 goto err_out_free_intr;
 440         }
 441 
 442         new->total_size = total_size;
 443 
 444         q_base = new->base;
 445         q_base_pa = new->base_pa;
 446 
 447         cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
 448         cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
 449 
 450         if (flags & IONIC_QCQ_F_SG) {
 451                 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
 452                                         PAGE_SIZE);
 453                 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
 454                 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
 455         }
 456 
 457         ionic_q_map(&new->q, q_base, q_base_pa);
 458         ionic_cq_map(&new->cq, cq_base, cq_base_pa);
 459         ionic_cq_bind(&new->cq, &new->q);
 460 
 461         *qcq = new;
 462 
 463         return 0;
 464 
 465 err_out_free_intr:
 466         ionic_intr_free(lif, new->intr.index);
 467 err_out:
 468         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
 469         return err;
 470 }
 471 
 472 static int ionic_qcqs_alloc(struct ionic_lif *lif)
 473 {
 474         struct device *dev = lif->ionic->dev;
 475         unsigned int q_list_size;
 476         unsigned int flags;
 477         int err;
 478         int i;
 479 
 480         flags = IONIC_QCQ_F_INTR;
 481         err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
 482                               IONIC_ADMINQ_LENGTH,
 483                               sizeof(struct ionic_admin_cmd),
 484                               sizeof(struct ionic_admin_comp),
 485                               0, lif->kern_pid, &lif->adminqcq);
 486         if (err)
 487                 return err;
 488 
 489         if (lif->ionic->nnqs_per_lif) {
 490                 flags = IONIC_QCQ_F_NOTIFYQ;
 491                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
 492                                       flags, IONIC_NOTIFYQ_LENGTH,
 493                                       sizeof(struct ionic_notifyq_cmd),
 494                                       sizeof(union ionic_notifyq_comp),
 495                                       0, lif->kern_pid, &lif->notifyqcq);
 496                 if (err)
 497                         goto err_out_free_adminqcq;
 498 
 499                 /* Let the notifyq ride on the adminq interrupt */
 500                 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
 501         }
 502 
 503         q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
 504         err = -ENOMEM;
 505         lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
 506         if (!lif->txqcqs)
 507                 goto err_out_free_notifyqcq;
 508         for (i = 0; i < lif->nxqs; i++) {
 509                 lif->txqcqs[i].stats = devm_kzalloc(dev,
 510                                                     sizeof(struct ionic_q_stats),
 511                                                     GFP_KERNEL);
 512                 if (!lif->txqcqs[i].stats)
 513                         goto err_out_free_tx_stats;
 514         }
 515 
 516         lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
 517         if (!lif->rxqcqs)
 518                 goto err_out_free_tx_stats;
 519         for (i = 0; i < lif->nxqs; i++) {
 520                 lif->rxqcqs[i].stats = devm_kzalloc(dev,
 521                                                     sizeof(struct ionic_q_stats),
 522                                                     GFP_KERNEL);
 523                 if (!lif->rxqcqs[i].stats)
 524                         goto err_out_free_rx_stats;
 525         }
 526 
 527         return 0;
 528 
 529 err_out_free_rx_stats:
 530         for (i = 0; i < lif->nxqs; i++)
 531                 if (lif->rxqcqs[i].stats)
 532                         devm_kfree(dev, lif->rxqcqs[i].stats);
 533         devm_kfree(dev, lif->rxqcqs);
 534         lif->rxqcqs = NULL;
 535 err_out_free_tx_stats:
 536         for (i = 0; i < lif->nxqs; i++)
 537                 if (lif->txqcqs[i].stats)
 538                         devm_kfree(dev, lif->txqcqs[i].stats);
 539         devm_kfree(dev, lif->txqcqs);
 540         lif->txqcqs = NULL;
 541 err_out_free_notifyqcq:
 542         if (lif->notifyqcq) {
 543                 ionic_qcq_free(lif, lif->notifyqcq);
 544                 lif->notifyqcq = NULL;
 545         }
 546 err_out_free_adminqcq:
 547         ionic_qcq_free(lif, lif->adminqcq);
 548         lif->adminqcq = NULL;
 549 
 550         return err;
 551 }
 552 
 553 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
 554 {
 555         struct device *dev = lif->ionic->dev;
 556         struct ionic_queue *q = &qcq->q;
 557         struct ionic_cq *cq = &qcq->cq;
 558         struct ionic_admin_ctx ctx = {
 559                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 560                 .cmd.q_init = {
 561                         .opcode = IONIC_CMD_Q_INIT,
 562                         .lif_index = cpu_to_le16(lif->index),
 563                         .type = q->type,
 564                         .index = cpu_to_le32(q->index),
 565                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
 566                                              IONIC_QINIT_F_SG),
 567                         .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
 568                         .pid = cpu_to_le16(q->pid),
 569                         .ring_size = ilog2(q->num_descs),
 570                         .ring_base = cpu_to_le64(q->base_pa),
 571                         .cq_ring_base = cpu_to_le64(cq->base_pa),
 572                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
 573                 },
 574         };
 575         int err;
 576 
 577         dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
 578         dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
 579         dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
 580         dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
 581 
 582         err = ionic_adminq_post_wait(lif, &ctx);
 583         if (err)
 584                 return err;
 585 
 586         q->hw_type = ctx.comp.q_init.hw_type;
 587         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
 588         q->dbval = IONIC_DBELL_QID(q->hw_index);
 589 
 590         dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
 591         dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
 592 
 593         qcq->flags |= IONIC_QCQ_F_INITED;
 594 
 595         ionic_debugfs_add_qcq(lif, qcq);
 596 
 597         return 0;
 598 }
 599 
 600 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
 601 {
 602         struct device *dev = lif->ionic->dev;
 603         struct ionic_queue *q = &qcq->q;
 604         struct ionic_cq *cq = &qcq->cq;
 605         struct ionic_admin_ctx ctx = {
 606                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 607                 .cmd.q_init = {
 608                         .opcode = IONIC_CMD_Q_INIT,
 609                         .lif_index = cpu_to_le16(lif->index),
 610                         .type = q->type,
 611                         .index = cpu_to_le32(q->index),
 612                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
 613                         .intr_index = cpu_to_le16(cq->bound_intr->index),
 614                         .pid = cpu_to_le16(q->pid),
 615                         .ring_size = ilog2(q->num_descs),
 616                         .ring_base = cpu_to_le64(q->base_pa),
 617                         .cq_ring_base = cpu_to_le64(cq->base_pa),
 618                 },
 619         };
 620         int err;
 621 
 622         dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
 623         dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
 624         dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
 625         dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
 626 
 627         err = ionic_adminq_post_wait(lif, &ctx);
 628         if (err)
 629                 return err;
 630 
 631         q->hw_type = ctx.comp.q_init.hw_type;
 632         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
 633         q->dbval = IONIC_DBELL_QID(q->hw_index);
 634 
 635         dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
 636         dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
 637 
 638         netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
 639                        NAPI_POLL_WEIGHT);
 640 
 641         err = ionic_request_irq(lif, qcq);
 642         if (err) {
 643                 netif_napi_del(&qcq->napi);
 644                 return err;
 645         }
 646 
 647         qcq->flags |= IONIC_QCQ_F_INITED;
 648 
 649         ionic_debugfs_add_qcq(lif, qcq);
 650 
 651         return 0;
 652 }
 653 
 654 static bool ionic_notifyq_service(struct ionic_cq *cq,
 655                                   struct ionic_cq_info *cq_info)
 656 {
 657         union ionic_notifyq_comp *comp = cq_info->cq_desc;
 658         struct net_device *netdev;
 659         struct ionic_queue *q;
 660         struct ionic_lif *lif;
 661         u64 eid;
 662 
 663         q = cq->bound_q;
 664         lif = q->info[0].cb_arg;
 665         netdev = lif->netdev;
 666         eid = le64_to_cpu(comp->event.eid);
 667 
 668         /* Have we run out of new completions to process? */
 669         if (eid <= lif->last_eid)
 670                 return false;
 671 
 672         lif->last_eid = eid;
 673 
 674         dev_dbg(lif->ionic->dev, "notifyq event:\n");
 675         dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
 676                          comp, sizeof(*comp), true);
 677 
 678         switch (le16_to_cpu(comp->event.ecode)) {
 679         case IONIC_EVENT_LINK_CHANGE:
 680                 ionic_link_status_check_request(lif);
 681                 break;
 682         case IONIC_EVENT_RESET:
 683                 netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
 684                             eid);
 685                 netdev_info(netdev, "  reset_code=%d state=%d\n",
 686                             comp->reset.reset_code,
 687                             comp->reset.state);
 688                 break;
 689         default:
 690                 netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
 691                             comp->event.ecode, eid);
 692                 break;
 693         }
 694 
 695         return true;
 696 }
 697 
 698 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
 699 {
 700         struct ionic_dev *idev = &lif->ionic->idev;
 701         struct ionic_cq *cq = &lif->notifyqcq->cq;
 702         u32 work_done;
 703 
 704         work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
 705                                      NULL, NULL);
 706         if (work_done)
 707                 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
 708                                    work_done, IONIC_INTR_CRED_RESET_COALESCE);
 709 
 710         return work_done;
 711 }
 712 
 713 static bool ionic_adminq_service(struct ionic_cq *cq,
 714                                  struct ionic_cq_info *cq_info)
 715 {
 716         struct ionic_admin_comp *comp = cq_info->cq_desc;
 717 
 718         if (!color_match(comp->color, cq->done_color))
 719                 return false;
 720 
 721         ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
 722 
 723         return true;
 724 }
 725 
 726 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
 727 {
 728         struct ionic_lif *lif = napi_to_cq(napi)->lif;
 729         int n_work = 0;
 730         int a_work = 0;
 731 
 732         if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
 733                 n_work = ionic_notifyq_clean(lif, budget);
 734         a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
 735 
 736         return max(n_work, a_work);
 737 }
 738 
 739 static void ionic_get_stats64(struct net_device *netdev,
 740                               struct rtnl_link_stats64 *ns)
 741 {
 742         struct ionic_lif *lif = netdev_priv(netdev);
 743         struct ionic_lif_stats *ls;
 744 
 745         memset(ns, 0, sizeof(*ns));
 746         ls = &lif->info->stats;
 747 
 748         ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
 749                          le64_to_cpu(ls->rx_mcast_packets) +
 750                          le64_to_cpu(ls->rx_bcast_packets);
 751 
 752         ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
 753                          le64_to_cpu(ls->tx_mcast_packets) +
 754                          le64_to_cpu(ls->tx_bcast_packets);
 755 
 756         ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
 757                        le64_to_cpu(ls->rx_mcast_bytes) +
 758                        le64_to_cpu(ls->rx_bcast_bytes);
 759 
 760         ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
 761                        le64_to_cpu(ls->tx_mcast_bytes) +
 762                        le64_to_cpu(ls->tx_bcast_bytes);
 763 
 764         ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
 765                          le64_to_cpu(ls->rx_mcast_drop_packets) +
 766                          le64_to_cpu(ls->rx_bcast_drop_packets);
 767 
 768         ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
 769                          le64_to_cpu(ls->tx_mcast_drop_packets) +
 770                          le64_to_cpu(ls->tx_bcast_drop_packets);
 771 
 772         ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
 773 
 774         ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
 775 
 776         ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
 777                                le64_to_cpu(ls->rx_queue_disabled) +
 778                                le64_to_cpu(ls->rx_desc_fetch_error) +
 779                                le64_to_cpu(ls->rx_desc_data_error);
 780 
 781         ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
 782                                 le64_to_cpu(ls->tx_queue_disabled) +
 783                                 le64_to_cpu(ls->tx_desc_fetch_error) +
 784                                 le64_to_cpu(ls->tx_desc_data_error);
 785 
 786         ns->rx_errors = ns->rx_over_errors +
 787                         ns->rx_missed_errors;
 788 
 789         ns->tx_errors = ns->tx_aborted_errors;
 790 }
 791 
 792 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
 793 {
 794         struct ionic_admin_ctx ctx = {
 795                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 796                 .cmd.rx_filter_add = {
 797                         .opcode = IONIC_CMD_RX_FILTER_ADD,
 798                         .lif_index = cpu_to_le16(lif->index),
 799                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
 800                 },
 801         };
 802         struct ionic_rx_filter *f;
 803         int err;
 804 
 805         /* don't bother if we already have it */
 806         spin_lock_bh(&lif->rx_filters.lock);
 807         f = ionic_rx_filter_by_addr(lif, addr);
 808         spin_unlock_bh(&lif->rx_filters.lock);
 809         if (f)
 810                 return 0;
 811 
 812         netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
 813                    ctx.comp.rx_filter_add.filter_id);
 814 
 815         memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
 816         err = ionic_adminq_post_wait(lif, &ctx);
 817         if (err)
 818                 return err;
 819 
 820         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
 821 }
 822 
 823 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
 824 {
 825         struct ionic_admin_ctx ctx = {
 826                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 827                 .cmd.rx_filter_del = {
 828                         .opcode = IONIC_CMD_RX_FILTER_DEL,
 829                         .lif_index = cpu_to_le16(lif->index),
 830                 },
 831         };
 832         struct ionic_rx_filter *f;
 833         int err;
 834 
 835         spin_lock_bh(&lif->rx_filters.lock);
 836         f = ionic_rx_filter_by_addr(lif, addr);
 837         if (!f) {
 838                 spin_unlock_bh(&lif->rx_filters.lock);
 839                 return -ENOENT;
 840         }
 841 
 842         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
 843         ionic_rx_filter_free(lif, f);
 844         spin_unlock_bh(&lif->rx_filters.lock);
 845 
 846         err = ionic_adminq_post_wait(lif, &ctx);
 847         if (err)
 848                 return err;
 849 
 850         netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
 851                    ctx.cmd.rx_filter_del.filter_id);
 852 
 853         return 0;
 854 }
 855 
 856 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
 857 {
 858         struct ionic *ionic = lif->ionic;
 859         struct ionic_deferred_work *work;
 860         unsigned int nmfilters;
 861         unsigned int nufilters;
 862 
 863         if (add) {
 864                 /* Do we have space for this filter?  We test the counters
 865                  * here before checking the need for deferral so that we
 866                  * can return an overflow error to the stack.
 867                  */
 868                 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
 869                 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
 870 
 871                 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
 872                         lif->nmcast++;
 873                 else if (!is_multicast_ether_addr(addr) &&
 874                          lif->nucast < nufilters)
 875                         lif->nucast++;
 876                 else
 877                         return -ENOSPC;
 878         } else {
 879                 if (is_multicast_ether_addr(addr) && lif->nmcast)
 880                         lif->nmcast--;
 881                 else if (!is_multicast_ether_addr(addr) && lif->nucast)
 882                         lif->nucast--;
 883         }
 884 
 885         if (in_interrupt()) {
 886                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
 887                 if (!work) {
 888                         netdev_err(lif->netdev, "%s OOM\n", __func__);
 889                         return -ENOMEM;
 890                 }
 891                 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
 892                                    IONIC_DW_TYPE_RX_ADDR_DEL;
 893                 memcpy(work->addr, addr, ETH_ALEN);
 894                 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
 895                            add ? "add" : "del", addr);
 896                 ionic_lif_deferred_enqueue(&lif->deferred, work);
 897         } else {
 898                 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
 899                            add ? "add" : "del", addr);
 900                 if (add)
 901                         return ionic_lif_addr_add(lif, addr);
 902                 else
 903                         return ionic_lif_addr_del(lif, addr);
 904         }
 905 
 906         return 0;
 907 }
 908 
 909 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
 910 {
 911         return ionic_lif_addr(netdev_priv(netdev), addr, true);
 912 }
 913 
 914 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
 915 {
 916         return ionic_lif_addr(netdev_priv(netdev), addr, false);
 917 }
 918 
 919 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
 920 {
 921         struct ionic_admin_ctx ctx = {
 922                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
 923                 .cmd.rx_mode_set = {
 924                         .opcode = IONIC_CMD_RX_MODE_SET,
 925                         .lif_index = cpu_to_le16(lif->index),
 926                         .rx_mode = cpu_to_le16(rx_mode),
 927                 },
 928         };
 929         char buf[128];
 930         int err;
 931         int i;
 932 #define REMAIN(__x) (sizeof(buf) - (__x))
 933 
 934         i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
 935                      lif->rx_mode, rx_mode);
 936         if (rx_mode & IONIC_RX_MODE_F_UNICAST)
 937                 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
 938         if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
 939                 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
 940         if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
 941                 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
 942         if (rx_mode & IONIC_RX_MODE_F_PROMISC)
 943                 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
 944         if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
 945                 i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
 946         netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
 947 
 948         err = ionic_adminq_post_wait(lif, &ctx);
 949         if (err)
 950                 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
 951                             rx_mode, err);
 952         else
 953                 lif->rx_mode = rx_mode;
 954 }
 955 
 956 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
 957 {
 958         struct ionic_deferred_work *work;
 959 
 960         if (in_interrupt()) {
 961                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
 962                 if (!work) {
 963                         netdev_err(lif->netdev, "%s OOM\n", __func__);
 964                         return;
 965                 }
 966                 work->type = IONIC_DW_TYPE_RX_MODE;
 967                 work->rx_mode = rx_mode;
 968                 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
 969                 ionic_lif_deferred_enqueue(&lif->deferred, work);
 970         } else {
 971                 ionic_lif_rx_mode(lif, rx_mode);
 972         }
 973 }
 974 
 975 static void ionic_set_rx_mode(struct net_device *netdev)
 976 {
 977         struct ionic_lif *lif = netdev_priv(netdev);
 978         struct ionic_identity *ident;
 979         unsigned int nfilters;
 980         unsigned int rx_mode;
 981 
 982         ident = &lif->ionic->ident;
 983 
 984         rx_mode = IONIC_RX_MODE_F_UNICAST;
 985         rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
 986         rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
 987         rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
 988         rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
 989 
 990         /* sync unicast addresses
 991          * next check to see if we're in an overflow state
 992          *    if so, we track that we overflowed and enable NIC PROMISC
 993          *    else if the overflow is set and not needed
 994          *       we remove our overflow flag and check the netdev flags
 995          *       to see if we can disable NIC PROMISC
 996          */
 997         __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
 998         nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
 999         if (netdev_uc_count(netdev) + 1 > nfilters) {
1000                 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1001                 lif->uc_overflow = true;
1002         } else if (lif->uc_overflow) {
1003                 lif->uc_overflow = false;
1004                 if (!(netdev->flags & IFF_PROMISC))
1005                         rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1006         }
1007 
1008         /* same for multicast */
1009         __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1010         nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1011         if (netdev_mc_count(netdev) > nfilters) {
1012                 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1013                 lif->mc_overflow = true;
1014         } else if (lif->mc_overflow) {
1015                 lif->mc_overflow = false;
1016                 if (!(netdev->flags & IFF_ALLMULTI))
1017                         rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1018         }
1019 
1020         if (lif->rx_mode != rx_mode)
1021                 _ionic_lif_rx_mode(lif, rx_mode);
1022 }
1023 
1024 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1025 {
1026         u64 wanted = 0;
1027 
1028         if (features & NETIF_F_HW_VLAN_CTAG_TX)
1029                 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1030         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1031                 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1032         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1033                 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1034         if (features & NETIF_F_RXHASH)
1035                 wanted |= IONIC_ETH_HW_RX_HASH;
1036         if (features & NETIF_F_RXCSUM)
1037                 wanted |= IONIC_ETH_HW_RX_CSUM;
1038         if (features & NETIF_F_SG)
1039                 wanted |= IONIC_ETH_HW_TX_SG;
1040         if (features & NETIF_F_HW_CSUM)
1041                 wanted |= IONIC_ETH_HW_TX_CSUM;
1042         if (features & NETIF_F_TSO)
1043                 wanted |= IONIC_ETH_HW_TSO;
1044         if (features & NETIF_F_TSO6)
1045                 wanted |= IONIC_ETH_HW_TSO_IPV6;
1046         if (features & NETIF_F_TSO_ECN)
1047                 wanted |= IONIC_ETH_HW_TSO_ECN;
1048         if (features & NETIF_F_GSO_GRE)
1049                 wanted |= IONIC_ETH_HW_TSO_GRE;
1050         if (features & NETIF_F_GSO_GRE_CSUM)
1051                 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1052         if (features & NETIF_F_GSO_IPXIP4)
1053                 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1054         if (features & NETIF_F_GSO_IPXIP6)
1055                 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1056         if (features & NETIF_F_GSO_UDP_TUNNEL)
1057                 wanted |= IONIC_ETH_HW_TSO_UDP;
1058         if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1059                 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1060 
1061         return cpu_to_le64(wanted);
1062 }
1063 
1064 static int ionic_set_nic_features(struct ionic_lif *lif,
1065                                   netdev_features_t features)
1066 {
1067         struct device *dev = lif->ionic->dev;
1068         struct ionic_admin_ctx ctx = {
1069                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1070                 .cmd.lif_setattr = {
1071                         .opcode = IONIC_CMD_LIF_SETATTR,
1072                         .index = cpu_to_le16(lif->index),
1073                         .attr = IONIC_LIF_ATTR_FEATURES,
1074                 },
1075         };
1076         u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1077                          IONIC_ETH_HW_VLAN_RX_STRIP |
1078                          IONIC_ETH_HW_VLAN_RX_FILTER;
1079         int err;
1080 
1081         ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1082         err = ionic_adminq_post_wait(lif, &ctx);
1083         if (err)
1084                 return err;
1085 
1086         lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1087                                        ctx.comp.lif_setattr.features);
1088 
1089         if ((vlan_flags & features) &&
1090             !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1091                 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1092 
1093         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1094                 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1095         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1096                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1097         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1098                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1099         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1100                 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1101         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1102                 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1103         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1104                 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1105         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1106                 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1107         if (lif->hw_features & IONIC_ETH_HW_TSO)
1108                 dev_dbg(dev, "feature ETH_HW_TSO\n");
1109         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1110                 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1111         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1112                 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1113         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1114                 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1115         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1116                 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1117         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1118                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1119         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1120                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1121         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1122                 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1123         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1124                 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1125 
1126         return 0;
1127 }
1128 
1129 static int ionic_init_nic_features(struct ionic_lif *lif)
1130 {
1131         struct net_device *netdev = lif->netdev;
1132         netdev_features_t features;
1133         int err;
1134 
1135         /* set up what we expect to support by default */
1136         features = NETIF_F_HW_VLAN_CTAG_TX |
1137                    NETIF_F_HW_VLAN_CTAG_RX |
1138                    NETIF_F_HW_VLAN_CTAG_FILTER |
1139                    NETIF_F_RXHASH |
1140                    NETIF_F_SG |
1141                    NETIF_F_HW_CSUM |
1142                    NETIF_F_RXCSUM |
1143                    NETIF_F_TSO |
1144                    NETIF_F_TSO6 |
1145                    NETIF_F_TSO_ECN;
1146 
1147         err = ionic_set_nic_features(lif, features);
1148         if (err)
1149                 return err;
1150 
1151         /* tell the netdev what we actually can support */
1152         netdev->features |= NETIF_F_HIGHDMA;
1153 
1154         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1155                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1156         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1157                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1158         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1159                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1160         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1161                 netdev->hw_features |= NETIF_F_RXHASH;
1162         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1163                 netdev->hw_features |= NETIF_F_SG;
1164 
1165         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1166                 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1167         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1168                 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1169         if (lif->hw_features & IONIC_ETH_HW_TSO)
1170                 netdev->hw_enc_features |= NETIF_F_TSO;
1171         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1172                 netdev->hw_enc_features |= NETIF_F_TSO6;
1173         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1174                 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1175         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1176                 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1177         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1178                 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1179         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1180                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1181         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1182                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1183         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1184                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1185         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1186                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1187 
1188         netdev->hw_features |= netdev->hw_enc_features;
1189         netdev->features |= netdev->hw_features;
1190 
1191         netdev->priv_flags |= IFF_UNICAST_FLT;
1192 
1193         return 0;
1194 }
1195 
1196 static int ionic_set_features(struct net_device *netdev,
1197                               netdev_features_t features)
1198 {
1199         struct ionic_lif *lif = netdev_priv(netdev);
1200         int err;
1201 
1202         netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1203                    __func__, (u64)lif->netdev->features, (u64)features);
1204 
1205         err = ionic_set_nic_features(lif, features);
1206 
1207         return err;
1208 }
1209 
1210 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1211 {
1212         struct sockaddr *addr = sa;
1213         u8 *mac;
1214         int err;
1215 
1216         mac = (u8 *)addr->sa_data;
1217         if (ether_addr_equal(netdev->dev_addr, mac))
1218                 return 0;
1219 
1220         err = eth_prepare_mac_addr_change(netdev, addr);
1221         if (err)
1222                 return err;
1223 
1224         if (!is_zero_ether_addr(netdev->dev_addr)) {
1225                 netdev_info(netdev, "deleting mac addr %pM\n",
1226                             netdev->dev_addr);
1227                 ionic_addr_del(netdev, netdev->dev_addr);
1228         }
1229 
1230         eth_commit_mac_addr_change(netdev, addr);
1231         netdev_info(netdev, "updating mac addr %pM\n", mac);
1232 
1233         return ionic_addr_add(netdev, mac);
1234 }
1235 
1236 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1237 {
1238         struct ionic_lif *lif = netdev_priv(netdev);
1239         struct ionic_admin_ctx ctx = {
1240                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1241                 .cmd.lif_setattr = {
1242                         .opcode = IONIC_CMD_LIF_SETATTR,
1243                         .index = cpu_to_le16(lif->index),
1244                         .attr = IONIC_LIF_ATTR_MTU,
1245                         .mtu = cpu_to_le32(new_mtu),
1246                 },
1247         };
1248         int err;
1249 
1250         err = ionic_adminq_post_wait(lif, &ctx);
1251         if (err)
1252                 return err;
1253 
1254         netdev->mtu = new_mtu;
1255         err = ionic_reset_queues(lif);
1256 
1257         return err;
1258 }
1259 
1260 static void ionic_tx_timeout_work(struct work_struct *ws)
1261 {
1262         struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1263 
1264         netdev_info(lif->netdev, "Tx Timeout recovery\n");
1265 
1266         rtnl_lock();
1267         ionic_reset_queues(lif);
1268         rtnl_unlock();
1269 }
1270 
1271 static void ionic_tx_timeout(struct net_device *netdev)
1272 {
1273         struct ionic_lif *lif = netdev_priv(netdev);
1274 
1275         schedule_work(&lif->tx_timeout_work);
1276 }
1277 
1278 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1279                                  u16 vid)
1280 {
1281         struct ionic_lif *lif = netdev_priv(netdev);
1282         struct ionic_admin_ctx ctx = {
1283                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1284                 .cmd.rx_filter_add = {
1285                         .opcode = IONIC_CMD_RX_FILTER_ADD,
1286                         .lif_index = cpu_to_le16(lif->index),
1287                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1288                         .vlan.vlan = cpu_to_le16(vid),
1289                 },
1290         };
1291         int err;
1292 
1293         err = ionic_adminq_post_wait(lif, &ctx);
1294         if (err)
1295                 return err;
1296 
1297         netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1298                    ctx.comp.rx_filter_add.filter_id);
1299 
1300         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1301 }
1302 
1303 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1304                                   u16 vid)
1305 {
1306         struct ionic_lif *lif = netdev_priv(netdev);
1307         struct ionic_admin_ctx ctx = {
1308                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1309                 .cmd.rx_filter_del = {
1310                         .opcode = IONIC_CMD_RX_FILTER_DEL,
1311                         .lif_index = cpu_to_le16(lif->index),
1312                 },
1313         };
1314         struct ionic_rx_filter *f;
1315 
1316         spin_lock_bh(&lif->rx_filters.lock);
1317 
1318         f = ionic_rx_filter_by_vlan(lif, vid);
1319         if (!f) {
1320                 spin_unlock_bh(&lif->rx_filters.lock);
1321                 return -ENOENT;
1322         }
1323 
1324         netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1325                    le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1326 
1327         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1328         ionic_rx_filter_free(lif, f);
1329         spin_unlock_bh(&lif->rx_filters.lock);
1330 
1331         return ionic_adminq_post_wait(lif, &ctx);
1332 }
1333 
1334 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1335                          const u8 *key, const u32 *indir)
1336 {
1337         struct ionic_admin_ctx ctx = {
1338                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1339                 .cmd.lif_setattr = {
1340                         .opcode = IONIC_CMD_LIF_SETATTR,
1341                         .attr = IONIC_LIF_ATTR_RSS,
1342                         .rss.types = cpu_to_le16(types),
1343                         .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1344                 },
1345         };
1346         unsigned int i, tbl_sz;
1347 
1348         lif->rss_types = types;
1349 
1350         if (key)
1351                 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1352 
1353         if (indir) {
1354                 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1355                 for (i = 0; i < tbl_sz; i++)
1356                         lif->rss_ind_tbl[i] = indir[i];
1357         }
1358 
1359         memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1360                IONIC_RSS_HASH_KEY_SIZE);
1361 
1362         return ionic_adminq_post_wait(lif, &ctx);
1363 }
1364 
1365 static int ionic_lif_rss_init(struct ionic_lif *lif)
1366 {
1367         unsigned int tbl_sz;
1368         unsigned int i;
1369 
1370         lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1371                          IONIC_RSS_TYPE_IPV4_TCP |
1372                          IONIC_RSS_TYPE_IPV4_UDP |
1373                          IONIC_RSS_TYPE_IPV6     |
1374                          IONIC_RSS_TYPE_IPV6_TCP |
1375                          IONIC_RSS_TYPE_IPV6_UDP;
1376 
1377         /* Fill indirection table with 'default' values */
1378         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1379         for (i = 0; i < tbl_sz; i++)
1380                 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1381 
1382         return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1383 }
1384 
1385 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1386 {
1387         int tbl_sz;
1388 
1389         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1390         memset(lif->rss_ind_tbl, 0, tbl_sz);
1391         memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1392 
1393         ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1394 }
1395 
1396 static void ionic_txrx_disable(struct ionic_lif *lif)
1397 {
1398         unsigned int i;
1399 
1400         for (i = 0; i < lif->nxqs; i++) {
1401                 ionic_qcq_disable(lif->txqcqs[i].qcq);
1402                 ionic_qcq_disable(lif->rxqcqs[i].qcq);
1403         }
1404 }
1405 
1406 static void ionic_txrx_deinit(struct ionic_lif *lif)
1407 {
1408         unsigned int i;
1409 
1410         for (i = 0; i < lif->nxqs; i++) {
1411                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1412                 ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1413 
1414                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1415                 ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1416                 ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1417         }
1418 }
1419 
1420 static void ionic_txrx_free(struct ionic_lif *lif)
1421 {
1422         unsigned int i;
1423 
1424         for (i = 0; i < lif->nxqs; i++) {
1425                 ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1426                 lif->txqcqs[i].qcq = NULL;
1427 
1428                 ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1429                 lif->rxqcqs[i].qcq = NULL;
1430         }
1431 }
1432 
1433 static int ionic_txrx_alloc(struct ionic_lif *lif)
1434 {
1435         unsigned int flags;
1436         unsigned int i;
1437         int err = 0;
1438         u32 coal;
1439 
1440         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1441         for (i = 0; i < lif->nxqs; i++) {
1442                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1443                                       lif->ntxq_descs,
1444                                       sizeof(struct ionic_txq_desc),
1445                                       sizeof(struct ionic_txq_comp),
1446                                       sizeof(struct ionic_txq_sg_desc),
1447                                       lif->kern_pid, &lif->txqcqs[i].qcq);
1448                 if (err)
1449                         goto err_out;
1450 
1451                 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1452         }
1453 
1454         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
1455         coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
1456         for (i = 0; i < lif->nxqs; i++) {
1457                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1458                                       lif->nrxq_descs,
1459                                       sizeof(struct ionic_rxq_desc),
1460                                       sizeof(struct ionic_rxq_comp),
1461                                       0, lif->kern_pid, &lif->rxqcqs[i].qcq);
1462                 if (err)
1463                         goto err_out;
1464 
1465                 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1466 
1467                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1468                                      lif->rxqcqs[i].qcq->intr.index, coal);
1469                 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1470                                           lif->txqcqs[i].qcq);
1471         }
1472 
1473         return 0;
1474 
1475 err_out:
1476         ionic_txrx_free(lif);
1477 
1478         return err;
1479 }
1480 
1481 static int ionic_txrx_init(struct ionic_lif *lif)
1482 {
1483         unsigned int i;
1484         int err;
1485 
1486         for (i = 0; i < lif->nxqs; i++) {
1487                 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1488                 if (err)
1489                         goto err_out;
1490 
1491                 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1492                 if (err) {
1493                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1494                         goto err_out;
1495                 }
1496         }
1497 
1498         if (lif->netdev->features & NETIF_F_RXHASH)
1499                 ionic_lif_rss_init(lif);
1500 
1501         ionic_set_rx_mode(lif->netdev);
1502 
1503         return 0;
1504 
1505 err_out:
1506         while (i--) {
1507                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1508                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1509         }
1510 
1511         return err;
1512 }
1513 
1514 static int ionic_txrx_enable(struct ionic_lif *lif)
1515 {
1516         int i, err;
1517 
1518         for (i = 0; i < lif->nxqs; i++) {
1519                 err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1520                 if (err)
1521                         goto err_out;
1522 
1523                 ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1524                 err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1525                 if (err) {
1526                         ionic_qcq_disable(lif->txqcqs[i].qcq);
1527                         goto err_out;
1528                 }
1529         }
1530 
1531         return 0;
1532 
1533 err_out:
1534         while (i--) {
1535                 ionic_qcq_disable(lif->rxqcqs[i].qcq);
1536                 ionic_qcq_disable(lif->txqcqs[i].qcq);
1537         }
1538 
1539         return err;
1540 }
1541 
1542 int ionic_open(struct net_device *netdev)
1543 {
1544         struct ionic_lif *lif = netdev_priv(netdev);
1545         int err;
1546 
1547         netif_carrier_off(netdev);
1548 
1549         err = ionic_txrx_alloc(lif);
1550         if (err)
1551                 return err;
1552 
1553         err = ionic_txrx_init(lif);
1554         if (err)
1555                 goto err_txrx_free;
1556 
1557         err = ionic_txrx_enable(lif);
1558         if (err)
1559                 goto err_txrx_deinit;
1560 
1561         netif_set_real_num_tx_queues(netdev, lif->nxqs);
1562         netif_set_real_num_rx_queues(netdev, lif->nxqs);
1563 
1564         set_bit(IONIC_LIF_UP, lif->state);
1565 
1566         ionic_link_status_check_request(lif);
1567         if (netif_carrier_ok(netdev))
1568                 netif_tx_wake_all_queues(netdev);
1569 
1570         return 0;
1571 
1572 err_txrx_deinit:
1573         ionic_txrx_deinit(lif);
1574 err_txrx_free:
1575         ionic_txrx_free(lif);
1576         return err;
1577 }
1578 
1579 int ionic_stop(struct net_device *netdev)
1580 {
1581         struct ionic_lif *lif = netdev_priv(netdev);
1582         int err = 0;
1583 
1584         if (!test_bit(IONIC_LIF_UP, lif->state)) {
1585                 dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1586                         __func__, lif->name);
1587                 return 0;
1588         }
1589         dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1590         clear_bit(IONIC_LIF_UP, lif->state);
1591 
1592         /* carrier off before disabling queues to avoid watchdog timeout */
1593         netif_carrier_off(netdev);
1594         netif_tx_stop_all_queues(netdev);
1595         netif_tx_disable(netdev);
1596 
1597         ionic_txrx_disable(lif);
1598         ionic_txrx_deinit(lif);
1599         ionic_txrx_free(lif);
1600 
1601         return err;
1602 }
1603 
1604 static const struct net_device_ops ionic_netdev_ops = {
1605         .ndo_open               = ionic_open,
1606         .ndo_stop               = ionic_stop,
1607         .ndo_start_xmit         = ionic_start_xmit,
1608         .ndo_get_stats64        = ionic_get_stats64,
1609         .ndo_set_rx_mode        = ionic_set_rx_mode,
1610         .ndo_set_features       = ionic_set_features,
1611         .ndo_set_mac_address    = ionic_set_mac_address,
1612         .ndo_validate_addr      = eth_validate_addr,
1613         .ndo_tx_timeout         = ionic_tx_timeout,
1614         .ndo_change_mtu         = ionic_change_mtu,
1615         .ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1616         .ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1617 };
1618 
1619 int ionic_reset_queues(struct ionic_lif *lif)
1620 {
1621         bool running;
1622         int err = 0;
1623 
1624         /* Put off the next watchdog timeout */
1625         netif_trans_update(lif->netdev);
1626 
1627         if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
1628                 return -EBUSY;
1629 
1630         running = netif_running(lif->netdev);
1631         if (running)
1632                 err = ionic_stop(lif->netdev);
1633         if (!err && running)
1634                 ionic_open(lif->netdev);
1635 
1636         clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
1637 
1638         return err;
1639 }
1640 
1641 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1642 {
1643         struct device *dev = ionic->dev;
1644         struct net_device *netdev;
1645         struct ionic_lif *lif;
1646         int tbl_sz;
1647         u32 coal;
1648         int err;
1649 
1650         netdev = alloc_etherdev_mqs(sizeof(*lif),
1651                                     ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1652         if (!netdev) {
1653                 dev_err(dev, "Cannot allocate netdev, aborting\n");
1654                 return ERR_PTR(-ENOMEM);
1655         }
1656 
1657         SET_NETDEV_DEV(netdev, dev);
1658 
1659         lif = netdev_priv(netdev);
1660         lif->netdev = netdev;
1661         ionic->master_lif = lif;
1662         netdev->netdev_ops = &ionic_netdev_ops;
1663         ionic_ethtool_set_ops(netdev);
1664 
1665         netdev->watchdog_timeo = 2 * HZ;
1666         netdev->min_mtu = IONIC_MIN_MTU;
1667         netdev->max_mtu = IONIC_MAX_MTU;
1668 
1669         lif->neqs = ionic->neqs_per_lif;
1670         lif->nxqs = ionic->ntxqs_per_lif;
1671 
1672         lif->ionic = ionic;
1673         lif->index = index;
1674         lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1675         lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1676 
1677         /* Convert the default coalesce value to actual hw resolution */
1678         coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
1679         lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
1680 
1681         snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1682 
1683         spin_lock_init(&lif->adminq_lock);
1684 
1685         spin_lock_init(&lif->deferred.lock);
1686         INIT_LIST_HEAD(&lif->deferred.list);
1687         INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1688 
1689         /* allocate lif info */
1690         lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1691         lif->info = dma_alloc_coherent(dev, lif->info_sz,
1692                                        &lif->info_pa, GFP_KERNEL);
1693         if (!lif->info) {
1694                 dev_err(dev, "Failed to allocate lif info, aborting\n");
1695                 err = -ENOMEM;
1696                 goto err_out_free_netdev;
1697         }
1698 
1699         /* allocate queues */
1700         err = ionic_qcqs_alloc(lif);
1701         if (err)
1702                 goto err_out_free_lif_info;
1703 
1704         /* allocate rss indirection table */
1705         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1706         lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1707         lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1708                                               &lif->rss_ind_tbl_pa,
1709                                               GFP_KERNEL);
1710 
1711         if (!lif->rss_ind_tbl) {
1712                 err = -ENOMEM;
1713                 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1714                 goto err_out_free_qcqs;
1715         }
1716         netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
1717 
1718         list_add_tail(&lif->list, &ionic->lifs);
1719 
1720         return lif;
1721 
1722 err_out_free_qcqs:
1723         ionic_qcqs_free(lif);
1724 err_out_free_lif_info:
1725         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1726         lif->info = NULL;
1727         lif->info_pa = 0;
1728 err_out_free_netdev:
1729         free_netdev(lif->netdev);
1730         lif = NULL;
1731 
1732         return ERR_PTR(err);
1733 }
1734 
1735 int ionic_lifs_alloc(struct ionic *ionic)
1736 {
1737         struct ionic_lif *lif;
1738 
1739         INIT_LIST_HEAD(&ionic->lifs);
1740 
1741         /* only build the first lif, others are for later features */
1742         set_bit(0, ionic->lifbits);
1743         lif = ionic_lif_alloc(ionic, 0);
1744 
1745         return PTR_ERR_OR_ZERO(lif);
1746 }
1747 
1748 static void ionic_lif_reset(struct ionic_lif *lif)
1749 {
1750         struct ionic_dev *idev = &lif->ionic->idev;
1751 
1752         mutex_lock(&lif->ionic->dev_cmd_lock);
1753         ionic_dev_cmd_lif_reset(idev, lif->index);
1754         ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1755         mutex_unlock(&lif->ionic->dev_cmd_lock);
1756 }
1757 
1758 static void ionic_lif_free(struct ionic_lif *lif)
1759 {
1760         struct device *dev = lif->ionic->dev;
1761 
1762         /* free rss indirection table */
1763         dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
1764                           lif->rss_ind_tbl_pa);
1765         lif->rss_ind_tbl = NULL;
1766         lif->rss_ind_tbl_pa = 0;
1767 
1768         /* free queues */
1769         ionic_qcqs_free(lif);
1770         ionic_lif_reset(lif);
1771 
1772         /* free lif info */
1773         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1774         lif->info = NULL;
1775         lif->info_pa = 0;
1776 
1777         /* unmap doorbell page */
1778         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
1779         lif->kern_dbpage = NULL;
1780         kfree(lif->dbid_inuse);
1781         lif->dbid_inuse = NULL;
1782 
1783         /* free netdev & lif */
1784         ionic_debugfs_del_lif(lif);
1785         list_del(&lif->list);
1786         free_netdev(lif->netdev);
1787 }
1788 
1789 void ionic_lifs_free(struct ionic *ionic)
1790 {
1791         struct list_head *cur, *tmp;
1792         struct ionic_lif *lif;
1793 
1794         list_for_each_safe(cur, tmp, &ionic->lifs) {
1795                 lif = list_entry(cur, struct ionic_lif, list);
1796 
1797                 ionic_lif_free(lif);
1798         }
1799 }
1800 
1801 static void ionic_lif_deinit(struct ionic_lif *lif)
1802 {
1803         if (!test_bit(IONIC_LIF_INITED, lif->state))
1804                 return;
1805 
1806         clear_bit(IONIC_LIF_INITED, lif->state);
1807 
1808         ionic_rx_filters_deinit(lif);
1809         ionic_lif_rss_deinit(lif);
1810 
1811         napi_disable(&lif->adminqcq->napi);
1812         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1813         ionic_lif_qcq_deinit(lif, lif->adminqcq);
1814 
1815         ionic_lif_reset(lif);
1816 }
1817 
1818 void ionic_lifs_deinit(struct ionic *ionic)
1819 {
1820         struct list_head *cur, *tmp;
1821         struct ionic_lif *lif;
1822 
1823         list_for_each_safe(cur, tmp, &ionic->lifs) {
1824                 lif = list_entry(cur, struct ionic_lif, list);
1825                 ionic_lif_deinit(lif);
1826         }
1827 }
1828 
1829 static int ionic_lif_adminq_init(struct ionic_lif *lif)
1830 {
1831         struct device *dev = lif->ionic->dev;
1832         struct ionic_q_init_comp comp;
1833         struct ionic_dev *idev;
1834         struct ionic_qcq *qcq;
1835         struct ionic_queue *q;
1836         int err;
1837 
1838         idev = &lif->ionic->idev;
1839         qcq = lif->adminqcq;
1840         q = &qcq->q;
1841 
1842         mutex_lock(&lif->ionic->dev_cmd_lock);
1843         ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
1844         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1845         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1846         mutex_unlock(&lif->ionic->dev_cmd_lock);
1847         if (err) {
1848                 netdev_err(lif->netdev, "adminq init failed %d\n", err);
1849                 return err;
1850         }
1851 
1852         q->hw_type = comp.hw_type;
1853         q->hw_index = le32_to_cpu(comp.hw_index);
1854         q->dbval = IONIC_DBELL_QID(q->hw_index);
1855 
1856         dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
1857         dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
1858 
1859         netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
1860                        NAPI_POLL_WEIGHT);
1861 
1862         err = ionic_request_irq(lif, qcq);
1863         if (err) {
1864                 netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
1865                 netif_napi_del(&qcq->napi);
1866                 return err;
1867         }
1868 
1869         napi_enable(&qcq->napi);
1870 
1871         if (qcq->flags & IONIC_QCQ_F_INTR)
1872                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1873                                 IONIC_INTR_MASK_CLEAR);
1874 
1875         qcq->flags |= IONIC_QCQ_F_INITED;
1876 
1877         ionic_debugfs_add_qcq(lif, qcq);
1878 
1879         return 0;
1880 }
1881 
1882 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
1883 {
1884         struct ionic_qcq *qcq = lif->notifyqcq;
1885         struct device *dev = lif->ionic->dev;
1886         struct ionic_queue *q = &qcq->q;
1887         int err;
1888 
1889         struct ionic_admin_ctx ctx = {
1890                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1891                 .cmd.q_init = {
1892                         .opcode = IONIC_CMD_Q_INIT,
1893                         .lif_index = cpu_to_le16(lif->index),
1894                         .type = q->type,
1895                         .index = cpu_to_le32(q->index),
1896                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
1897                                              IONIC_QINIT_F_ENA),
1898                         .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
1899                         .pid = cpu_to_le16(q->pid),
1900                         .ring_size = ilog2(q->num_descs),
1901                         .ring_base = cpu_to_le64(q->base_pa),
1902                 }
1903         };
1904 
1905         dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
1906         dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
1907         dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
1908         dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
1909 
1910         err = ionic_adminq_post_wait(lif, &ctx);
1911         if (err)
1912                 return err;
1913 
1914         q->hw_type = ctx.comp.q_init.hw_type;
1915         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
1916         q->dbval = IONIC_DBELL_QID(q->hw_index);
1917 
1918         dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
1919         dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
1920 
1921         /* preset the callback info */
1922         q->info[0].cb_arg = lif;
1923 
1924         qcq->flags |= IONIC_QCQ_F_INITED;
1925 
1926         ionic_debugfs_add_qcq(lif, qcq);
1927 
1928         return 0;
1929 }
1930 
1931 static int ionic_station_set(struct ionic_lif *lif)
1932 {
1933         struct net_device *netdev = lif->netdev;
1934         struct ionic_admin_ctx ctx = {
1935                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1936                 .cmd.lif_getattr = {
1937                         .opcode = IONIC_CMD_LIF_GETATTR,
1938                         .index = cpu_to_le16(lif->index),
1939                         .attr = IONIC_LIF_ATTR_MAC,
1940                 },
1941         };
1942         struct sockaddr addr;
1943         int err;
1944 
1945         err = ionic_adminq_post_wait(lif, &ctx);
1946         if (err)
1947                 return err;
1948 
1949         memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
1950         addr.sa_family = AF_INET;
1951         err = eth_prepare_mac_addr_change(netdev, &addr);
1952         if (err)
1953                 return err;
1954 
1955         if (!is_zero_ether_addr(netdev->dev_addr)) {
1956                 netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
1957                            netdev->dev_addr);
1958                 ionic_lif_addr(lif, netdev->dev_addr, false);
1959         }
1960 
1961         eth_commit_mac_addr_change(netdev, &addr);
1962         netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
1963                    netdev->dev_addr);
1964         ionic_lif_addr(lif, netdev->dev_addr, true);
1965 
1966         return 0;
1967 }
1968 
1969 static int ionic_lif_init(struct ionic_lif *lif)
1970 {
1971         struct ionic_dev *idev = &lif->ionic->idev;
1972         struct device *dev = lif->ionic->dev;
1973         struct ionic_lif_init_comp comp;
1974         int dbpage_num;
1975         int err;
1976 
1977         ionic_debugfs_add_lif(lif);
1978 
1979         mutex_lock(&lif->ionic->dev_cmd_lock);
1980         ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
1981         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1982         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1983         mutex_unlock(&lif->ionic->dev_cmd_lock);
1984         if (err)
1985                 return err;
1986 
1987         lif->hw_index = le16_to_cpu(comp.hw_index);
1988 
1989         /* now that we have the hw_index we can figure out our doorbell page */
1990         lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
1991         if (!lif->dbid_count) {
1992                 dev_err(dev, "No doorbell pages, aborting\n");
1993                 return -EINVAL;
1994         }
1995 
1996         lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
1997         if (!lif->dbid_inuse) {
1998                 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
1999                 return -ENOMEM;
2000         }
2001 
2002         /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2003         set_bit(0, lif->dbid_inuse);
2004         lif->kern_pid = 0;
2005 
2006         dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2007         lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2008         if (!lif->kern_dbpage) {
2009                 dev_err(dev, "Cannot map dbpage, aborting\n");
2010                 err = -ENOMEM;
2011                 goto err_out_free_dbid;
2012         }
2013 
2014         err = ionic_lif_adminq_init(lif);
2015         if (err)
2016                 goto err_out_adminq_deinit;
2017 
2018         if (lif->ionic->nnqs_per_lif) {
2019                 err = ionic_lif_notifyq_init(lif);
2020                 if (err)
2021                         goto err_out_notifyq_deinit;
2022         }
2023 
2024         err = ionic_init_nic_features(lif);
2025         if (err)
2026                 goto err_out_notifyq_deinit;
2027 
2028         err = ionic_rx_filters_init(lif);
2029         if (err)
2030                 goto err_out_notifyq_deinit;
2031 
2032         err = ionic_station_set(lif);
2033         if (err)
2034                 goto err_out_notifyq_deinit;
2035 
2036         lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2037 
2038         set_bit(IONIC_LIF_INITED, lif->state);
2039 
2040         INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2041 
2042         return 0;
2043 
2044 err_out_notifyq_deinit:
2045         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2046 err_out_adminq_deinit:
2047         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2048         ionic_lif_reset(lif);
2049         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2050         lif->kern_dbpage = NULL;
2051 err_out_free_dbid:
2052         kfree(lif->dbid_inuse);
2053         lif->dbid_inuse = NULL;
2054 
2055         return err;
2056 }
2057 
2058 int ionic_lifs_init(struct ionic *ionic)
2059 {
2060         struct list_head *cur, *tmp;
2061         struct ionic_lif *lif;
2062         int err;
2063 
2064         list_for_each_safe(cur, tmp, &ionic->lifs) {
2065                 lif = list_entry(cur, struct ionic_lif, list);
2066                 err = ionic_lif_init(lif);
2067                 if (err)
2068                         return err;
2069         }
2070 
2071         return 0;
2072 }
2073 
2074 static void ionic_lif_notify_work(struct work_struct *ws)
2075 {
2076 }
2077 
2078 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2079 {
2080         struct ionic_admin_ctx ctx = {
2081                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2082                 .cmd.lif_setattr = {
2083                         .opcode = IONIC_CMD_LIF_SETATTR,
2084                         .index = cpu_to_le16(lif->index),
2085                         .attr = IONIC_LIF_ATTR_NAME,
2086                 },
2087         };
2088 
2089         strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2090                 sizeof(ctx.cmd.lif_setattr.name));
2091 
2092         ionic_adminq_post_wait(lif, &ctx);
2093 }
2094 
2095 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2096 {
2097         if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2098                 return NULL;
2099 
2100         return netdev_priv(netdev);
2101 }
2102 
2103 static int ionic_lif_notify(struct notifier_block *nb,
2104                             unsigned long event, void *info)
2105 {
2106         struct net_device *ndev = netdev_notifier_info_to_dev(info);
2107         struct ionic *ionic = container_of(nb, struct ionic, nb);
2108         struct ionic_lif *lif = ionic_netdev_lif(ndev);
2109 
2110         if (!lif || lif->ionic != ionic)
2111                 return NOTIFY_DONE;
2112 
2113         switch (event) {
2114         case NETDEV_CHANGENAME:
2115                 ionic_lif_set_netdev_info(lif);
2116                 break;
2117         }
2118 
2119         return NOTIFY_DONE;
2120 }
2121 
2122 int ionic_lifs_register(struct ionic *ionic)
2123 {
2124         int err;
2125 
2126         INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2127 
2128         ionic->nb.notifier_call = ionic_lif_notify;
2129 
2130         err = register_netdevice_notifier(&ionic->nb);
2131         if (err)
2132                 ionic->nb.notifier_call = NULL;
2133 
2134         /* only register LIF0 for now */
2135         err = register_netdev(ionic->master_lif->netdev);
2136         if (err) {
2137                 dev_err(ionic->dev, "Cannot register net device, aborting\n");
2138                 return err;
2139         }
2140 
2141         ionic_link_status_check_request(ionic->master_lif);
2142         ionic->master_lif->registered = true;
2143 
2144         return 0;
2145 }
2146 
2147 void ionic_lifs_unregister(struct ionic *ionic)
2148 {
2149         if (ionic->nb.notifier_call) {
2150                 unregister_netdevice_notifier(&ionic->nb);
2151                 cancel_work_sync(&ionic->nb_work);
2152                 ionic->nb.notifier_call = NULL;
2153         }
2154 
2155         /* There is only one lif ever registered in the
2156          * current model, so don't bother searching the
2157          * ionic->lif for candidates to unregister
2158          */
2159         cancel_work_sync(&ionic->master_lif->deferred.work);
2160         cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2161         if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2162                 unregister_netdev(ionic->master_lif->netdev);
2163 }
2164 
2165 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2166                        union ionic_lif_identity *lid)
2167 {
2168         struct ionic_dev *idev = &ionic->idev;
2169         size_t sz;
2170         int err;
2171 
2172         sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2173 
2174         mutex_lock(&ionic->dev_cmd_lock);
2175         ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2176         err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2177         memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2178         mutex_unlock(&ionic->dev_cmd_lock);
2179         if (err)
2180                 return (err);
2181 
2182         dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2183                 le64_to_cpu(lid->capabilities));
2184 
2185         dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2186                 le32_to_cpu(lid->eth.max_ucast_filters));
2187         dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2188                 le32_to_cpu(lid->eth.max_mcast_filters));
2189         dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2190                 le64_to_cpu(lid->eth.config.features));
2191         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2192                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2193         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2194                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2195         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2196                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2197         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2198                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2199         dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2200         dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2201         dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2202                 le32_to_cpu(lid->eth.config.mtu));
2203 
2204         return 0;
2205 }
2206 
2207 int ionic_lifs_size(struct ionic *ionic)
2208 {
2209         struct ionic_identity *ident = &ionic->ident;
2210         unsigned int nintrs, dev_nintrs;
2211         union ionic_lif_config *lc;
2212         unsigned int ntxqs_per_lif;
2213         unsigned int nrxqs_per_lif;
2214         unsigned int neqs_per_lif;
2215         unsigned int nnqs_per_lif;
2216         unsigned int nxqs, neqs;
2217         unsigned int min_intrs;
2218         int err;
2219 
2220         lc = &ident->lif.eth.config;
2221         dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2222         neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2223         nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2224         ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2225         nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2226 
2227         nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2228         nxqs = min(nxqs, num_online_cpus());
2229         neqs = min(neqs_per_lif, num_online_cpus());
2230 
2231 try_again:
2232         /* interrupt usage:
2233          *    1 for master lif adminq/notifyq
2234          *    1 for each CPU for master lif TxRx queue pairs
2235          *    whatever's left is for RDMA queues
2236          */
2237         nintrs = 1 + nxqs + neqs;
2238         min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2239 
2240         if (nintrs > dev_nintrs)
2241                 goto try_fewer;
2242 
2243         err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2244         if (err < 0 && err != -ENOSPC) {
2245                 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2246                 return err;
2247         }
2248         if (err == -ENOSPC)
2249                 goto try_fewer;
2250 
2251         if (err != nintrs) {
2252                 ionic_bus_free_irq_vectors(ionic);
2253                 goto try_fewer;
2254         }
2255 
2256         ionic->nnqs_per_lif = nnqs_per_lif;
2257         ionic->neqs_per_lif = neqs;
2258         ionic->ntxqs_per_lif = nxqs;
2259         ionic->nrxqs_per_lif = nxqs;
2260         ionic->nintrs = nintrs;
2261 
2262         ionic_debugfs_add_sizes(ionic);
2263 
2264         return 0;
2265 
2266 try_fewer:
2267         if (nnqs_per_lif > 1) {
2268                 nnqs_per_lif >>= 1;
2269                 goto try_again;
2270         }
2271         if (neqs > 1) {
2272                 neqs >>= 1;
2273                 goto try_again;
2274         }
2275         if (nxqs > 1) {
2276                 nxqs >>= 1;
2277                 goto try_again;
2278         }
2279         dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2280         return -ENOSPC;
2281 }

/* [<][>][^][v][top][bottom][index][help] */