root/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. storm_memset_vf_to_pf
  2. storm_memset_func_en
  3. bnx2x_vf_idx_by_abs_fid
  4. bnx2x_vf_by_abs_fid
  5. bnx2x_vf_igu_ack_sb
  6. bnx2x_validate_vf_sp_objs
  7. bnx2x_vfop_qctor_dump_tx
  8. bnx2x_vfop_qctor_dump_rx
  9. bnx2x_vfop_qctor_prep
  10. bnx2x_vf_queue_create
  11. bnx2x_vf_queue_destroy
  12. bnx2x_vf_set_igu_info
  13. bnx2x_vf_vlan_credit
  14. bnx2x_vf_vlan_mac_clear
  15. bnx2x_vf_mac_vlan_config
  16. bnx2x_vf_mac_vlan_config_list
  17. bnx2x_vf_queue_setup
  18. bnx2x_vf_queue_flr
  19. bnx2x_vf_mcast
  20. bnx2x_vf_prep_rx_mode
  21. bnx2x_vf_rxmode
  22. bnx2x_vf_queue_teardown
  23. bnx2x_vf_enable_internal
  24. bnx2x_vf_semi_clear_err
  25. bnx2x_vf_pglue_clear_err
  26. bnx2x_vf_igu_reset
  27. bnx2x_vf_enable_access
  28. bnx2x_vf_enable_traffic
  29. bnx2x_vf_is_pcie_pending
  30. bnx2x_vf_flr_clnup_epilog
  31. bnx2x_iov_static_resc
  32. bnx2x_vf_free_resc
  33. bnx2x_vf_flr_clnup_hw
  34. bnx2x_vf_flr
  35. bnx2x_vf_flr_clnup
  36. bnx2x_vf_handle_flr_event
  37. bnx2x_iov_init_dq
  38. bnx2x_iov_init_dmae
  39. bnx2x_vf_domain
  40. bnx2x_vf_bus
  41. bnx2x_vf_devfn
  42. bnx2x_vf_set_bars
  43. bnx2x_get_vf_igu_cam_info
  44. __bnx2x_iov_free_vfdb
  45. bnx2x_sriov_pci_cfg_info
  46. bnx2x_sriov_info
  47. bnx2x_iov_init_one
  48. bnx2x_iov_remove_one
  49. bnx2x_iov_free_mem
  50. bnx2x_iov_alloc_mem
  51. bnx2x_vfq_init
  52. bnx2x_max_speed_cap
  53. bnx2x_iov_link_update_vf
  54. bnx2x_set_vf_link_state
  55. bnx2x_iov_link_update
  56. bnx2x_iov_nic_init
  57. bnx2x_iov_chip_cleanup
  58. bnx2x_iov_init_ilt
  59. bnx2x_iov_is_vf_cid
  60. bnx2x_vf_handle_classification_eqe
  61. bnx2x_vf_handle_mcast_eqe
  62. bnx2x_vf_handle_filters_eqe
  63. bnx2x_vf_handle_rss_update_eqe
  64. bnx2x_iov_eq_sp_event
  65. bnx2x_vf_by_cid
  66. bnx2x_iov_set_queue_sp_obj
  67. bnx2x_iov_adjust_stats_req
  68. bnx2x_vf_qtbl_set_q
  69. bnx2x_vf_clr_qtbl
  70. bnx2x_vf_igu_disable
  71. bnx2x_vf_max_queue_cnt
  72. bnx2x_vf_chk_avail_resc
  73. bnx2x_vf_acquire
  74. bnx2x_vf_init
  75. bnx2x_set_vf_state
  76. bnx2x_vf_close
  77. bnx2x_vf_free
  78. bnx2x_vf_rss_update
  79. bnx2x_vf_tpa_update
  80. bnx2x_vf_release
  81. bnx2x_lock_vf_pf_channel
  82. bnx2x_unlock_vf_pf_channel
  83. bnx2x_set_pf_tx_switching
  84. bnx2x_sriov_configure
  85. bnx2x_enable_sriov
  86. bnx2x_pf_set_vfs_vlan
  87. bnx2x_disable_sriov
  88. bnx2x_vf_op_prep
  89. bnx2x_get_vf_config
  90. bnx2x_set_vf_mac
  91. bnx2x_set_vf_vlan_acceptance
  92. bnx2x_set_vf_vlan_filter
  93. bnx2x_set_vf_vlan
  94. bnx2x_set_vf_spoofchk
  95. bnx2x_crc_vf_bulletin
  96. bnx2x_sample_bulletin
  97. bnx2x_timer_sriov
  98. bnx2x_vf_doorbells
  99. bnx2x_vf_pci_dealloc
  100. bnx2x_vf_pci_alloc
  101. bnx2x_iov_channel_down
  102. bnx2x_iov_task
  103. bnx2x_schedule_iov_task

   1 /* bnx2x_sriov.c: QLogic Everest network driver.
   2  *
   3  * Copyright 2009-2013 Broadcom Corporation
   4  * Copyright 2014 QLogic Corporation
   5  * All rights reserved
   6  *
   7  * Unless you and QLogic execute a separate written software license
   8  * agreement governing use of this software, this software is licensed to you
   9  * under the terms of the GNU General Public License version 2, available
  10  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  11  *
  12  * Notwithstanding the above, under no circumstances may you combine this
  13  * software in any way with any other QLogic software provided under a
  14  * license other than the GPL, without QLogic's express prior written
  15  * consent.
  16  *
  17  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  18  * Written by: Shmulik Ravid
  19  *             Ariel Elior <ariel.elior@qlogic.com>
  20  *
  21  */
  22 #include "bnx2x.h"
  23 #include "bnx2x_init.h"
  24 #include "bnx2x_cmn.h"
  25 #include "bnx2x_sp.h"
  26 #include <linux/crc32.h>
  27 #include <linux/if_vlan.h>
  28 
  29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
  30                             struct bnx2x_virtf **vf,
  31                             struct pf_vf_bulletin_content **bulletin,
  32                             bool test_queue);
  33 
  34 /* General service functions */
  35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  36                                          u16 pf_id)
  37 {
  38         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  39                 pf_id);
  40         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  41                 pf_id);
  42         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  43                 pf_id);
  44         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  45                 pf_id);
  46 }
  47 
  48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  49                                         u8 enable)
  50 {
  51         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  52                 enable);
  53         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  54                 enable);
  55         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  56                 enable);
  57         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  58                 enable);
  59 }
  60 
  61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  62 {
  63         int idx;
  64 
  65         for_each_vf(bp, idx)
  66                 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
  67                         break;
  68         return idx;
  69 }
  70 
  71 static
  72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
  73 {
  74         u16 idx =  (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
  75         return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
  76 }
  77 
  78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
  79                                 u8 igu_sb_id, u8 segment, u16 index, u8 op,
  80                                 u8 update)
  81 {
  82         /* acking a VF sb through the PF - use the GRC */
  83         u32 ctl;
  84         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
  85         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
  86         u32 func_encode = vf->abs_vfid;
  87         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
  88         struct igu_regular cmd_data = {0};
  89 
  90         cmd_data.sb_id_and_flags =
  91                         ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
  92                          (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
  93                          (update << IGU_REGULAR_BUPDATE_SHIFT) |
  94                          (op << IGU_REGULAR_ENABLE_INT_SHIFT));
  95 
  96         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
  97               func_encode << IGU_CTRL_REG_FID_SHIFT             |
  98               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
  99 
 100         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 101            cmd_data.sb_id_and_flags, igu_addr_data);
 102         REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
 103         barrier();
 104 
 105         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
 106            ctl, igu_addr_ctl);
 107         REG_WR(bp, igu_addr_ctl, ctl);
 108         barrier();
 109 }
 110 
 111 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
 112                                        struct bnx2x_virtf *vf,
 113                                        bool print_err)
 114 {
 115         if (!bnx2x_leading_vfq(vf, sp_initialized)) {
 116                 if (print_err)
 117                         BNX2X_ERR("Slowpath objects not yet initialized!\n");
 118                 else
 119                         DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
 120                 return false;
 121         }
 122         return true;
 123 }
 124 
 125 /* VFOP operations states */
 126 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 127                               struct bnx2x_queue_init_params *init_params,
 128                               struct bnx2x_queue_setup_params *setup_params,
 129                               u16 q_idx, u16 sb_idx)
 130 {
 131         DP(BNX2X_MSG_IOV,
 132            "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
 133            vf->abs_vfid,
 134            q_idx,
 135            sb_idx,
 136            init_params->tx.sb_cq_index,
 137            init_params->tx.hc_rate,
 138            setup_params->flags,
 139            setup_params->txq_params.traffic_type);
 140 }
 141 
 142 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
 143                             struct bnx2x_queue_init_params *init_params,
 144                             struct bnx2x_queue_setup_params *setup_params,
 145                             u16 q_idx, u16 sb_idx)
 146 {
 147         struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
 148 
 149         DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
 150            "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
 151            vf->abs_vfid,
 152            q_idx,
 153            sb_idx,
 154            init_params->rx.sb_cq_index,
 155            init_params->rx.hc_rate,
 156            setup_params->gen_params.mtu,
 157            rxq_params->buf_sz,
 158            rxq_params->sge_buf_sz,
 159            rxq_params->max_sges_pkt,
 160            rxq_params->tpa_agg_sz,
 161            setup_params->flags,
 162            rxq_params->drop_flags,
 163            rxq_params->cache_line_log);
 164 }
 165 
 166 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
 167                            struct bnx2x_virtf *vf,
 168                            struct bnx2x_vf_queue *q,
 169                            struct bnx2x_vf_queue_construct_params *p,
 170                            unsigned long q_type)
 171 {
 172         struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
 173         struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
 174 
 175         /* INIT */
 176 
 177         /* Enable host coalescing in the transition to INIT state */
 178         if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
 179                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
 180 
 181         if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
 182                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
 183 
 184         /* FW SB ID */
 185         init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 186         init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 187 
 188         /* context */
 189         init_p->cxts[0] = q->cxt;
 190 
 191         /* SETUP */
 192 
 193         /* Setup-op general parameters */
 194         setup_p->gen_params.spcl_id = vf->sp_cl_id;
 195         setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 196         setup_p->gen_params.fp_hsi = vf->fp_hsi;
 197 
 198         /* Setup-op flags:
 199          * collect statistics, zero statistics, local-switching, security,
 200          * OV for Flex10, RSS and MCAST for leading
 201          */
 202         if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
 203                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
 204 
 205         /* for VFs, enable tx switching, bd coherency, and mac address
 206          * anti-spoofing
 207          */
 208         __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
 209         __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
 210         if (vf->spoofchk)
 211                 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 212         else
 213                 __clear_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
 214 
 215         /* Setup-op rx parameters */
 216         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
 217                 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
 218 
 219                 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
 220                 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 221                 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
 222 
 223                 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
 224                         rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
 225         }
 226 
 227         /* Setup-op tx parameters */
 228         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
 229                 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
 230                 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
 231         }
 232 }
 233 
 234 static int bnx2x_vf_queue_create(struct bnx2x *bp,
 235                                  struct bnx2x_virtf *vf, int qid,
 236                                  struct bnx2x_vf_queue_construct_params *qctor)
 237 {
 238         struct bnx2x_queue_state_params *q_params;
 239         int rc = 0;
 240 
 241         DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 242 
 243         /* Prepare ramrod information */
 244         q_params = &qctor->qstate;
 245         q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 246         set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
 247 
 248         if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
 249             BNX2X_Q_LOGICAL_STATE_ACTIVE) {
 250                 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
 251                 goto out;
 252         }
 253 
 254         /* Run Queue 'construction' ramrods */
 255         q_params->cmd = BNX2X_Q_CMD_INIT;
 256         rc = bnx2x_queue_state_change(bp, q_params);
 257         if (rc)
 258                 goto out;
 259 
 260         memcpy(&q_params->params.setup, &qctor->prep_qsetup,
 261                sizeof(struct bnx2x_queue_setup_params));
 262         q_params->cmd = BNX2X_Q_CMD_SETUP;
 263         rc = bnx2x_queue_state_change(bp, q_params);
 264         if (rc)
 265                 goto out;
 266 
 267         /* enable interrupts */
 268         bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
 269                             USTORM_ID, 0, IGU_INT_ENABLE, 0);
 270 out:
 271         return rc;
 272 }
 273 
 274 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
 275                                   int qid)
 276 {
 277         enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
 278                                        BNX2X_Q_CMD_TERMINATE,
 279                                        BNX2X_Q_CMD_CFC_DEL};
 280         struct bnx2x_queue_state_params q_params;
 281         int rc, i;
 282 
 283         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 284 
 285         /* Prepare ramrod information */
 286         memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
 287         q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 288         set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
 289 
 290         if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
 291             BNX2X_Q_LOGICAL_STATE_STOPPED) {
 292                 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
 293                 goto out;
 294         }
 295 
 296         /* Run Queue 'destruction' ramrods */
 297         for (i = 0; i < ARRAY_SIZE(cmds); i++) {
 298                 q_params.cmd = cmds[i];
 299                 rc = bnx2x_queue_state_change(bp, &q_params);
 300                 if (rc) {
 301                         BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
 302                         return rc;
 303                 }
 304         }
 305 out:
 306         /* Clean Context */
 307         if (bnx2x_vfq(vf, qid, cxt)) {
 308                 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
 309                 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
 310         }
 311 
 312         return 0;
 313 }
 314 
 315 static void
 316 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
 317 {
 318         struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 319         if (vf) {
 320                 /* the first igu entry belonging to VFs of this PF */
 321                 if (!BP_VFDB(bp)->first_vf_igu_entry)
 322                         BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
 323 
 324                 /* the first igu entry belonging to this VF */
 325                 if (!vf_sb_count(vf))
 326                         vf->igu_base_id = igu_sb_id;
 327 
 328                 ++vf_sb_count(vf);
 329                 ++vf->sb_count;
 330         }
 331         BP_VFDB(bp)->vf_sbs_pool++;
 332 }
 333 
 334 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 335                                         struct bnx2x_vlan_mac_obj *obj,
 336                                         atomic_t *counter)
 337 {
 338         struct list_head *pos;
 339         int read_lock;
 340         int cnt = 0;
 341 
 342         read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
 343         if (read_lock)
 344                 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
 345 
 346         list_for_each(pos, &obj->head)
 347                 cnt++;
 348 
 349         if (!read_lock)
 350                 bnx2x_vlan_mac_h_read_unlock(bp, obj);
 351 
 352         atomic_set(counter, cnt);
 353 }
 354 
 355 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
 356                                    int qid, bool drv_only, int type)
 357 {
 358         struct bnx2x_vlan_mac_ramrod_params ramrod;
 359         int rc;
 360 
 361         DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
 362                           (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 363                           (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 364 
 365         /* Prepare ramrod params */
 366         memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 367         if (type == BNX2X_VF_FILTER_VLAN_MAC) {
 368                 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 369                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 370         } else if (type == BNX2X_VF_FILTER_MAC) {
 371                 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 372                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 373         } else {
 374                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 375         }
 376         ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
 377 
 378         set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 379         if (drv_only)
 380                 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 381         else
 382                 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 383 
 384         /* Start deleting */
 385         rc = ramrod.vlan_mac_obj->delete_all(bp,
 386                                              ramrod.vlan_mac_obj,
 387                                              &ramrod.user_req.vlan_mac_flags,
 388                                              &ramrod.ramrod_flags);
 389         if (rc) {
 390                 BNX2X_ERR("Failed to delete all %s\n",
 391                           (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
 392                           (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 393                 return rc;
 394         }
 395 
 396         return 0;
 397 }
 398 
 399 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 400                                     struct bnx2x_virtf *vf, int qid,
 401                                     struct bnx2x_vf_mac_vlan_filter *filter,
 402                                     bool drv_only)
 403 {
 404         struct bnx2x_vlan_mac_ramrod_params ramrod;
 405         int rc;
 406 
 407         DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 408            vf->abs_vfid, filter->add ? "Adding" : "Deleting",
 409            (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
 410            (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 411 
 412         /* Prepare ramrod params */
 413         memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
 414         if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
 415                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
 416                 ramrod.user_req.u.vlan.vlan = filter->vid;
 417                 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 418                 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 419         } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 420                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 421                 ramrod.user_req.u.vlan.vlan = filter->vid;
 422         } else {
 423                 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 424                 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 425                 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
 426         }
 427         ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 428                                             BNX2X_VLAN_MAC_DEL;
 429 
 430         set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 431         if (drv_only)
 432                 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
 433         else
 434                 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 435 
 436         /* Add/Remove the filter */
 437         rc = bnx2x_config_vlan_mac(bp, &ramrod);
 438         if (rc == -EEXIST)
 439                 return 0;
 440         if (rc) {
 441                 BNX2X_ERR("Failed to %s %s\n",
 442                           filter->add ? "add" : "delete",
 443                           (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
 444                                 "VLAN-MAC" :
 445                           (filter->type == BNX2X_VF_FILTER_MAC) ?
 446                                 "MAC" : "VLAN");
 447                 return rc;
 448         }
 449 
 450         filter->applied = true;
 451 
 452         return 0;
 453 }
 454 
 455 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
 456                                   struct bnx2x_vf_mac_vlan_filters *filters,
 457                                   int qid, bool drv_only)
 458 {
 459         int rc = 0, i;
 460 
 461         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 462 
 463         if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
 464                 return -EINVAL;
 465 
 466         /* Prepare ramrod params */
 467         for (i = 0; i < filters->count; i++) {
 468                 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
 469                                               &filters->filters[i], drv_only);
 470                 if (rc)
 471                         break;
 472         }
 473 
 474         /* Rollback if needed */
 475         if (i != filters->count) {
 476                 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
 477                           i, filters->count);
 478                 while (--i >= 0) {
 479                         if (!filters->filters[i].applied)
 480                                 continue;
 481                         filters->filters[i].add = !filters->filters[i].add;
 482                         bnx2x_vf_mac_vlan_config(bp, vf, qid,
 483                                                  &filters->filters[i],
 484                                                  drv_only);
 485                 }
 486         }
 487 
 488         /* It's our responsibility to free the filters */
 489         kfree(filters);
 490 
 491         return rc;
 492 }
 493 
 494 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
 495                          struct bnx2x_vf_queue_construct_params *qctor)
 496 {
 497         int rc;
 498 
 499         DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 500 
 501         rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
 502         if (rc)
 503                 goto op_err;
 504 
 505         /* Schedule the configuration of any pending vlan filters */
 506         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 507                                BNX2X_MSG_IOV);
 508         return 0;
 509 op_err:
 510         BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 511         return rc;
 512 }
 513 
 514 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
 515                                int qid)
 516 {
 517         int rc;
 518 
 519         DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 520 
 521         /* If needed, clean the filtering data base */
 522         if ((qid == LEADING_IDX) &&
 523             bnx2x_validate_vf_sp_objs(bp, vf, false)) {
 524                 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 525                                              BNX2X_VF_FILTER_VLAN_MAC);
 526                 if (rc)
 527                         goto op_err;
 528                 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 529                                              BNX2X_VF_FILTER_VLAN);
 530                 if (rc)
 531                         goto op_err;
 532                 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
 533                                              BNX2X_VF_FILTER_MAC);
 534                 if (rc)
 535                         goto op_err;
 536         }
 537 
 538         /* Terminate queue */
 539         if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
 540                 struct bnx2x_queue_state_params qstate;
 541 
 542                 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
 543                 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
 544                 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
 545                 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
 546                 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
 547                 rc = bnx2x_queue_state_change(bp, &qstate);
 548                 if (rc)
 549                         goto op_err;
 550         }
 551 
 552         return 0;
 553 op_err:
 554         BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
 555         return rc;
 556 }
 557 
 558 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
 559                    bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
 560 {
 561         struct bnx2x_mcast_list_elem *mc = NULL;
 562         struct bnx2x_mcast_ramrod_params mcast;
 563         int rc, i;
 564 
 565         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 566 
 567         /* Prepare Multicast command */
 568         memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
 569         mcast.mcast_obj = &vf->mcast_obj;
 570         if (drv_only)
 571                 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
 572         else
 573                 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
 574         if (mc_num) {
 575                 mc = kcalloc(mc_num, sizeof(struct bnx2x_mcast_list_elem),
 576                              GFP_KERNEL);
 577                 if (!mc) {
 578                         BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
 579                         return -ENOMEM;
 580                 }
 581         }
 582 
 583         if (mc_num) {
 584                 INIT_LIST_HEAD(&mcast.mcast_list);
 585                 for (i = 0; i < mc_num; i++) {
 586                         mc[i].mac = mcasts[i];
 587                         list_add_tail(&mc[i].link,
 588                                       &mcast.mcast_list);
 589                 }
 590 
 591                 /* add new mcasts */
 592                 mcast.mcast_list_len = mc_num;
 593                 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
 594                 if (rc)
 595                         BNX2X_ERR("Failed to set multicasts\n");
 596         } else {
 597                 /* clear existing mcasts */
 598                 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
 599                 if (rc)
 600                         BNX2X_ERR("Failed to remove multicasts\n");
 601         }
 602 
 603         kfree(mc);
 604 
 605         return rc;
 606 }
 607 
 608 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
 609                                   struct bnx2x_rx_mode_ramrod_params *ramrod,
 610                                   struct bnx2x_virtf *vf,
 611                                   unsigned long accept_flags)
 612 {
 613         struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
 614 
 615         memset(ramrod, 0, sizeof(*ramrod));
 616         ramrod->cid = vfq->cid;
 617         ramrod->cl_id = vfq_cl_id(vf, vfq);
 618         ramrod->rx_mode_obj = &bp->rx_mode_obj;
 619         ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
 620         ramrod->rx_accept_flags = accept_flags;
 621         ramrod->tx_accept_flags = accept_flags;
 622         ramrod->pstate = &vf->filter_state;
 623         ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
 624 
 625         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
 626         set_bit(RAMROD_RX, &ramrod->ramrod_flags);
 627         set_bit(RAMROD_TX, &ramrod->ramrod_flags);
 628 
 629         ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
 630         ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
 631 }
 632 
 633 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
 634                     int qid, unsigned long accept_flags)
 635 {
 636         struct bnx2x_rx_mode_ramrod_params ramrod;
 637 
 638         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 639 
 640         bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
 641         set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
 642         vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
 643         return bnx2x_config_rx_mode(bp, &ramrod);
 644 }
 645 
 646 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
 647 {
 648         int rc;
 649 
 650         DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
 651 
 652         /* Remove all classification configuration for leading queue */
 653         if (qid == LEADING_IDX) {
 654                 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
 655                 if (rc)
 656                         goto op_err;
 657 
 658                 /* Remove filtering if feasible */
 659                 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 660                         rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 661                                                      false,
 662                                                      BNX2X_VF_FILTER_VLAN_MAC);
 663                         if (rc)
 664                                 goto op_err;
 665                         rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 666                                                      false,
 667                                                      BNX2X_VF_FILTER_VLAN);
 668                         if (rc)
 669                                 goto op_err;
 670                         rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
 671                                                      false,
 672                                                      BNX2X_VF_FILTER_MAC);
 673                         if (rc)
 674                                 goto op_err;
 675                         rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
 676                         if (rc)
 677                                 goto op_err;
 678                 }
 679         }
 680 
 681         /* Destroy queue */
 682         rc = bnx2x_vf_queue_destroy(bp, vf, qid);
 683         if (rc)
 684                 goto op_err;
 685         return rc;
 686 op_err:
 687         BNX2X_ERR("vf[%d:%d] error: rc %d\n",
 688                   vf->abs_vfid, qid, rc);
 689         return rc;
 690 }
 691 
 692 /* VF enable primitives
 693  * when pretend is required the caller is responsible
 694  * for calling pretend prior to calling these routines
 695  */
 696 
 697 /* internal vf enable - until vf is enabled internally all transactions
 698  * are blocked. This routine should always be called last with pretend.
 699  */
 700 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
 701 {
 702         REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
 703 }
 704 
 705 /* clears vf error in all semi blocks */
 706 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
 707 {
 708         REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
 709         REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
 710         REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
 711         REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
 712 }
 713 
 714 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
 715 {
 716         u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
 717         u32 was_err_reg = 0;
 718 
 719         switch (was_err_group) {
 720         case 0:
 721             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
 722             break;
 723         case 1:
 724             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
 725             break;
 726         case 2:
 727             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
 728             break;
 729         case 3:
 730             was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
 731             break;
 732         }
 733         REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
 734 }
 735 
 736 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 737 {
 738         int i;
 739         u32 val;
 740 
 741         /* Set VF masks and configuration - pretend */
 742         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 743 
 744         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
 745         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
 746         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
 747         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
 748         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
 749         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
 750 
 751         val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 752         val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
 753         val &= ~IGU_VF_CONF_PARENT_MASK;
 754         val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 755         REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
 756 
 757         DP(BNX2X_MSG_IOV,
 758            "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
 759            vf->abs_vfid, val);
 760 
 761         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 762 
 763         /* iterate over all queues, clear sb consumer */
 764         for (i = 0; i < vf_sb_count(vf); i++) {
 765                 u8 igu_sb_id = vf_igu_sb(vf, i);
 766 
 767                 /* zero prod memory */
 768                 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
 769 
 770                 /* clear sb state machine */
 771                 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
 772                                        false /* VF */);
 773 
 774                 /* disable + update */
 775                 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
 776                                     IGU_INT_DISABLE, 1);
 777         }
 778 }
 779 
 780 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
 781 {
 782         /* set the VF-PF association in the FW */
 783         storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
 784         storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
 785 
 786         /* clear vf errors*/
 787         bnx2x_vf_semi_clear_err(bp, abs_vfid);
 788         bnx2x_vf_pglue_clear_err(bp, abs_vfid);
 789 
 790         /* internal vf-enable - pretend */
 791         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
 792         DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
 793         bnx2x_vf_enable_internal(bp, true);
 794         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 795 }
 796 
 797 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
 798 {
 799         /* Reset vf in IGU  interrupts are still disabled */
 800         bnx2x_vf_igu_reset(bp, vf);
 801 
 802         /* pretend to enable the vf with the PBF */
 803         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 804         REG_WR(bp, PBF_REG_DISABLE_VF, 0);
 805         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 806 }
 807 
 808 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
 809 {
 810         struct pci_dev *dev;
 811         struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
 812 
 813         if (!vf)
 814                 return false;
 815 
 816         dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
 817         if (dev)
 818                 return bnx2x_is_pcie_pending(dev);
 819         return false;
 820 }
 821 
 822 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
 823 {
 824         /* Verify no pending pci transactions */
 825         if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
 826                 BNX2X_ERR("PCIE Transactions still pending\n");
 827 
 828         return 0;
 829 }
 830 
 831 /* must be called after the number of PF queues and the number of VFs are
 832  * both known
 833  */
 834 static void
 835 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 836 {
 837         struct vf_pf_resc_request *resc = &vf->alloc_resc;
 838 
 839         /* will be set only during VF-ACQUIRE */
 840         resc->num_rxqs = 0;
 841         resc->num_txqs = 0;
 842 
 843         resc->num_mac_filters = VF_MAC_CREDIT_CNT;
 844         resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 845 
 846         /* no real limitation */
 847         resc->num_mc_filters = 0;
 848 
 849         /* num_sbs already set */
 850         resc->num_sbs = vf->sb_count;
 851 }
 852 
 853 /* FLR routines: */
 854 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 855 {
 856         /* reset the state variables */
 857         bnx2x_iov_static_resc(bp, vf);
 858         vf->state = VF_FREE;
 859 }
 860 
 861 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
 862 {
 863         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
 864 
 865         /* DQ usage counter */
 866         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
 867         bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
 868                                         "DQ VF usage counter timed out",
 869                                         poll_cnt);
 870         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 871 
 872         /* FW cleanup command - poll for the results */
 873         if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
 874                                    poll_cnt))
 875                 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
 876 
 877         /* verify TX hw is flushed */
 878         bnx2x_tx_hw_flushed(bp, poll_cnt);
 879 }
 880 
 881 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
 882 {
 883         int rc, i;
 884 
 885         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
 886 
 887         /* the cleanup operations are valid if and only if the VF
 888          * was first acquired.
 889          */
 890         for (i = 0; i < vf_rxq_count(vf); i++) {
 891                 rc = bnx2x_vf_queue_flr(bp, vf, i);
 892                 if (rc)
 893                         goto out;
 894         }
 895 
 896         /* remove multicasts */
 897         bnx2x_vf_mcast(bp, vf, NULL, 0, true);
 898 
 899         /* dispatch final cleanup and wait for HW queues to flush */
 900         bnx2x_vf_flr_clnup_hw(bp, vf);
 901 
 902         /* release VF resources */
 903         bnx2x_vf_free_resc(bp, vf);
 904 
 905         vf->malicious = false;
 906 
 907         /* re-open the mailbox */
 908         bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
 909         return;
 910 out:
 911         BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
 912                   vf->abs_vfid, i, rc);
 913 }
 914 
 915 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
 916 {
 917         struct bnx2x_virtf *vf;
 918         int i;
 919 
 920         for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
 921                 /* VF should be RESET & in FLR cleanup states */
 922                 if (bnx2x_vf(bp, i, state) != VF_RESET ||
 923                     !bnx2x_vf(bp, i, flr_clnup_stage))
 924                         continue;
 925 
 926                 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
 927                    i, BNX2X_NR_VIRTFN(bp));
 928 
 929                 vf = BP_VF(bp, i);
 930 
 931                 /* lock the vf pf channel */
 932                 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 933 
 934                 /* invoke the VF FLR SM */
 935                 bnx2x_vf_flr(bp, vf);
 936 
 937                 /* mark the VF to be ACKED and continue */
 938                 vf->flr_clnup_stage = false;
 939                 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
 940         }
 941 
 942         /* Acknowledge the handled VFs.
 943          * we are acknowledge all the vfs which an flr was requested for, even
 944          * if amongst them there are such that we never opened, since the mcp
 945          * will interrupt us immediately again if we only ack some of the bits,
 946          * resulting in an endless loop. This can happen for example in KVM
 947          * where an 'all ones' flr request is sometimes given by hyper visor
 948          */
 949         DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
 950            bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 951         for (i = 0; i < FLRD_VFS_DWORDS; i++)
 952                 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
 953                           bp->vfdb->flrd_vfs[i]);
 954 
 955         bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
 956 
 957         /* clear the acked bits - better yet if the MCP implemented
 958          * write to clear semantics
 959          */
 960         for (i = 0; i < FLRD_VFS_DWORDS; i++)
 961                 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
 962 }
 963 
 964 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
 965 {
 966         int i;
 967 
 968         /* Read FLR'd VFs */
 969         for (i = 0; i < FLRD_VFS_DWORDS; i++)
 970                 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
 971 
 972         DP(BNX2X_MSG_MCP,
 973            "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
 974            bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
 975 
 976         for_each_vf(bp, i) {
 977                 struct bnx2x_virtf *vf = BP_VF(bp, i);
 978                 u32 reset = 0;
 979 
 980                 if (vf->abs_vfid < 32)
 981                         reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
 982                 else
 983                         reset = bp->vfdb->flrd_vfs[1] &
 984                                 (1 << (vf->abs_vfid - 32));
 985 
 986                 if (reset) {
 987                         /* set as reset and ready for cleanup */
 988                         vf->state = VF_RESET;
 989                         vf->flr_clnup_stage = true;
 990 
 991                         DP(BNX2X_MSG_IOV,
 992                            "Initiating Final cleanup for VF %d\n",
 993                            vf->abs_vfid);
 994                 }
 995         }
 996 
 997         /* do the FLR cleanup for all marked VFs*/
 998         bnx2x_vf_flr_clnup(bp);
 999 }
1000 
1001 /* IOV global initialization routines  */
1002 void bnx2x_iov_init_dq(struct bnx2x *bp)
1003 {
1004         if (!IS_SRIOV(bp))
1005                 return;
1006 
1007         /* Set the DQ such that the CID reflect the abs_vfid */
1008         REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1009         REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1010 
1011         /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1012          * the PF L2 queues
1013          */
1014         REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1015 
1016         /* The VF window size is the log2 of the max number of CIDs per VF */
1017         REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1018 
1019         /* The VF doorbell size  0 - *B, 4 - 128B. We set it here to match
1020          * the Pf doorbell size although the 2 are independent.
1021          */
1022         REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1023 
1024         /* No security checks for now -
1025          * configure single rule (out of 16) mask = 0x1, value = 0x0,
1026          * CID range 0 - 0x1ffff
1027          */
1028         REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1029         REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1030         REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1031         REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1032 
1033         /* set the VF doorbell threshold. This threshold represents the amount
1034          * of doorbells allowed in the main DORQ fifo for a specific VF.
1035          */
1036         REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1037 }
1038 
1039 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1040 {
1041         if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1042                 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1043 }
1044 
1045 static int bnx2x_vf_domain(struct bnx2x *bp, int vfid)
1046 {
1047         struct pci_dev *dev = bp->pdev;
1048 
1049         return pci_domain_nr(dev->bus);
1050 }
1051 
1052 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1053 {
1054         struct pci_dev *dev = bp->pdev;
1055         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1056 
1057         return dev->bus->number + ((dev->devfn + iov->offset +
1058                                     iov->stride * vfid) >> 8);
1059 }
1060 
1061 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1062 {
1063         struct pci_dev *dev = bp->pdev;
1064         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1065 
1066         return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1067 }
1068 
1069 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1070 {
1071         int i, n;
1072         struct pci_dev *dev = bp->pdev;
1073         struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1074 
1075         for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1076                 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1077                 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1078 
1079                 size /= iov->total;
1080                 vf->bars[n].bar = start + size * vf->abs_vfid;
1081                 vf->bars[n].size = size;
1082         }
1083 }
1084 
1085 static int
1086 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1087 {
1088         int sb_id;
1089         u32 val;
1090         u8 fid, current_pf = 0;
1091 
1092         /* IGU in normal mode - read CAM */
1093         for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1094                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1095                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1096                         continue;
1097                 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1098                 if (fid & IGU_FID_ENCODE_IS_PF)
1099                         current_pf = fid & IGU_FID_PF_NUM_MASK;
1100                 else if (current_pf == BP_FUNC(bp))
1101                         bnx2x_vf_set_igu_info(bp, sb_id,
1102                                               (fid & IGU_FID_VF_NUM_MASK));
1103                 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1104                    ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1105                    ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1106                    (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1107                    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1108         }
1109         DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1110         return BP_VFDB(bp)->vf_sbs_pool;
1111 }
1112 
1113 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1114 {
1115         if (bp->vfdb) {
1116                 kfree(bp->vfdb->vfqs);
1117                 kfree(bp->vfdb->vfs);
1118                 kfree(bp->vfdb);
1119         }
1120         bp->vfdb = NULL;
1121 }
1122 
1123 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1124 {
1125         int pos;
1126         struct pci_dev *dev = bp->pdev;
1127 
1128         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1129         if (!pos) {
1130                 BNX2X_ERR("failed to find SRIOV capability in device\n");
1131                 return -ENODEV;
1132         }
1133 
1134         iov->pos = pos;
1135         DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1136         pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1137         pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1138         pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1139         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1140         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1141         pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1142         pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1143         pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1144 
1145         return 0;
1146 }
1147 
1148 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1149 {
1150         u32 val;
1151 
1152         /* read the SRIOV capability structure
1153          * The fields can be read via configuration read or
1154          * directly from the device (starting at offset PCICFG_OFFSET)
1155          */
1156         if (bnx2x_sriov_pci_cfg_info(bp, iov))
1157                 return -ENODEV;
1158 
1159         /* get the number of SRIOV bars */
1160         iov->nres = 0;
1161 
1162         /* read the first_vfid */
1163         val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1164         iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1165                                * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1166 
1167         DP(BNX2X_MSG_IOV,
1168            "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1169            BP_FUNC(bp),
1170            iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1171            iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1172 
1173         return 0;
1174 }
1175 
1176 /* must be called after PF bars are mapped */
1177 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1178                        int num_vfs_param)
1179 {
1180         int err, i;
1181         struct bnx2x_sriov *iov;
1182         struct pci_dev *dev = bp->pdev;
1183 
1184         bp->vfdb = NULL;
1185 
1186         /* verify is pf */
1187         if (IS_VF(bp))
1188                 return 0;
1189 
1190         /* verify sriov capability is present in configuration space */
1191         if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1192                 return 0;
1193 
1194         /* verify chip revision */
1195         if (CHIP_IS_E1x(bp))
1196                 return 0;
1197 
1198         /* check if SRIOV support is turned off */
1199         if (!num_vfs_param)
1200                 return 0;
1201 
1202         /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1203         if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1204                 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1205                           BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1206                 return 0;
1207         }
1208 
1209         /* SRIOV can be enabled only with MSIX */
1210         if (int_mode_param == BNX2X_INT_MODE_MSI ||
1211             int_mode_param == BNX2X_INT_MODE_INTX) {
1212                 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1213                 return 0;
1214         }
1215 
1216         err = -EIO;
1217         /* verify ari is enabled */
1218         if (!pci_ari_enabled(bp->pdev->bus)) {
1219                 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1220                 return 0;
1221         }
1222 
1223         /* verify igu is in normal mode */
1224         if (CHIP_INT_MODE_IS_BC(bp)) {
1225                 BNX2X_ERR("IGU not normal mode,  SRIOV can not be enabled\n");
1226                 return 0;
1227         }
1228 
1229         /* allocate the vfs database */
1230         bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1231         if (!bp->vfdb) {
1232                 BNX2X_ERR("failed to allocate vf database\n");
1233                 err = -ENOMEM;
1234                 goto failed;
1235         }
1236 
1237         /* get the sriov info - Linux already collected all the pertinent
1238          * information, however the sriov structure is for the private use
1239          * of the pci module. Also we want this information regardless
1240          * of the hyper-visor.
1241          */
1242         iov = &(bp->vfdb->sriov);
1243         err = bnx2x_sriov_info(bp, iov);
1244         if (err)
1245                 goto failed;
1246 
1247         /* SR-IOV capability was enabled but there are no VFs*/
1248         if (iov->total == 0)
1249                 goto failed;
1250 
1251         iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1252 
1253         DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1254            num_vfs_param, iov->nr_virtfn);
1255 
1256         /* allocate the vf array */
1257         bp->vfdb->vfs = kcalloc(BNX2X_NR_VIRTFN(bp),
1258                                 sizeof(struct bnx2x_virtf),
1259                                 GFP_KERNEL);
1260         if (!bp->vfdb->vfs) {
1261                 BNX2X_ERR("failed to allocate vf array\n");
1262                 err = -ENOMEM;
1263                 goto failed;
1264         }
1265 
1266         /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1267         for_each_vf(bp, i) {
1268                 bnx2x_vf(bp, i, index) = i;
1269                 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1270                 bnx2x_vf(bp, i, state) = VF_FREE;
1271                 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1272                 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1273                 /* enable spoofchk by default */
1274                 bnx2x_vf(bp, i, spoofchk) = 1;
1275         }
1276 
1277         /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1278         if (!bnx2x_get_vf_igu_cam_info(bp)) {
1279                 BNX2X_ERR("No entries in IGU CAM for vfs\n");
1280                 err = -EINVAL;
1281                 goto failed;
1282         }
1283 
1284         /* allocate the queue arrays for all VFs */
1285         bp->vfdb->vfqs = kcalloc(BNX2X_MAX_NUM_VF_QUEUES,
1286                                  sizeof(struct bnx2x_vf_queue),
1287                                  GFP_KERNEL);
1288 
1289         if (!bp->vfdb->vfqs) {
1290                 BNX2X_ERR("failed to allocate vf queue array\n");
1291                 err = -ENOMEM;
1292                 goto failed;
1293         }
1294 
1295         /* Prepare the VFs event synchronization mechanism */
1296         mutex_init(&bp->vfdb->event_mutex);
1297 
1298         mutex_init(&bp->vfdb->bulletin_mutex);
1299 
1300         if (SHMEM2_HAS(bp, sriov_switch_mode))
1301                 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1302 
1303         return 0;
1304 failed:
1305         DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1306         __bnx2x_iov_free_vfdb(bp);
1307         return err;
1308 }
1309 
1310 void bnx2x_iov_remove_one(struct bnx2x *bp)
1311 {
1312         int vf_idx;
1313 
1314         /* if SRIOV is not enabled there's nothing to do */
1315         if (!IS_SRIOV(bp))
1316                 return;
1317 
1318         bnx2x_disable_sriov(bp);
1319 
1320         /* disable access to all VFs */
1321         for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1322                 bnx2x_pretend_func(bp,
1323                                    HW_VF_HANDLE(bp,
1324                                                 bp->vfdb->sriov.first_vf_in_pf +
1325                                                 vf_idx));
1326                 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1327                    bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1328                 bnx2x_vf_enable_internal(bp, 0);
1329                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1330         }
1331 
1332         /* free vf database */
1333         __bnx2x_iov_free_vfdb(bp);
1334 }
1335 
1336 void bnx2x_iov_free_mem(struct bnx2x *bp)
1337 {
1338         int i;
1339 
1340         if (!IS_SRIOV(bp))
1341                 return;
1342 
1343         /* free vfs hw contexts */
1344         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1345                 struct hw_dma *cxt = &bp->vfdb->context[i];
1346                 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1347         }
1348 
1349         BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1350                        BP_VFDB(bp)->sp_dma.mapping,
1351                        BP_VFDB(bp)->sp_dma.size);
1352 
1353         BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1354                        BP_VF_MBX_DMA(bp)->mapping,
1355                        BP_VF_MBX_DMA(bp)->size);
1356 
1357         BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1358                        BP_VF_BULLETIN_DMA(bp)->mapping,
1359                        BP_VF_BULLETIN_DMA(bp)->size);
1360 }
1361 
1362 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1363 {
1364         size_t tot_size;
1365         int i, rc = 0;
1366 
1367         if (!IS_SRIOV(bp))
1368                 return rc;
1369 
1370         /* allocate vfs hw contexts */
1371         tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1372                 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1373 
1374         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1375                 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1376                 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1377 
1378                 if (cxt->size) {
1379                         cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1380                         if (!cxt->addr)
1381                                 goto alloc_mem_err;
1382                 } else {
1383                         cxt->addr = NULL;
1384                         cxt->mapping = 0;
1385                 }
1386                 tot_size -= cxt->size;
1387         }
1388 
1389         /* allocate vfs ramrods dma memory - client_init and set_mac */
1390         tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1391         BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1392                                                    tot_size);
1393         if (!BP_VFDB(bp)->sp_dma.addr)
1394                 goto alloc_mem_err;
1395         BP_VFDB(bp)->sp_dma.size = tot_size;
1396 
1397         /* allocate mailboxes */
1398         tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1399         BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1400                                                   tot_size);
1401         if (!BP_VF_MBX_DMA(bp)->addr)
1402                 goto alloc_mem_err;
1403 
1404         BP_VF_MBX_DMA(bp)->size = tot_size;
1405 
1406         /* allocate local bulletin boards */
1407         tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1408         BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1409                                                        tot_size);
1410         if (!BP_VF_BULLETIN_DMA(bp)->addr)
1411                 goto alloc_mem_err;
1412 
1413         BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1414 
1415         return 0;
1416 
1417 alloc_mem_err:
1418         return -ENOMEM;
1419 }
1420 
1421 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1422                            struct bnx2x_vf_queue *q)
1423 {
1424         u8 cl_id = vfq_cl_id(vf, q);
1425         u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1426         unsigned long q_type = 0;
1427 
1428         set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1429         set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1430 
1431         /* Queue State object */
1432         bnx2x_init_queue_obj(bp, &q->sp_obj,
1433                              cl_id, &q->cid, 1, func_id,
1434                              bnx2x_vf_sp(bp, vf, q_data),
1435                              bnx2x_vf_sp_map(bp, vf, q_data),
1436                              q_type);
1437 
1438         /* sp indication is set only when vlan/mac/etc. are initialized */
1439         q->sp_initialized = false;
1440 
1441         DP(BNX2X_MSG_IOV,
1442            "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1443            vf->abs_vfid, q->sp_obj.func_id, q->cid);
1444 }
1445 
1446 static int bnx2x_max_speed_cap(struct bnx2x *bp)
1447 {
1448         u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1449 
1450         if (supported &
1451             (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1452                 return 20000;
1453 
1454         return 10000; /* assume lowest supported speed is 10G */
1455 }
1456 
1457 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1458 {
1459         struct bnx2x_link_report_data *state = &bp->last_reported_link;
1460         struct pf_vf_bulletin_content *bulletin;
1461         struct bnx2x_virtf *vf;
1462         bool update = true;
1463         int rc = 0;
1464 
1465         /* sanity and init */
1466         rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1467         if (rc)
1468                 return rc;
1469 
1470         mutex_lock(&bp->vfdb->bulletin_mutex);
1471 
1472         if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1473                 bulletin->valid_bitmap |= 1 << LINK_VALID;
1474 
1475                 bulletin->link_speed = state->line_speed;
1476                 bulletin->link_flags = 0;
1477                 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1478                              &state->link_report_flags))
1479                         bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1480                 if (test_bit(BNX2X_LINK_REPORT_FD,
1481                              &state->link_report_flags))
1482                         bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1483                 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1484                              &state->link_report_flags))
1485                         bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1486                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1487                              &state->link_report_flags))
1488                         bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1489         } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1490                    !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1491                 bulletin->valid_bitmap |= 1 << LINK_VALID;
1492                 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1493         } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1494                    (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1495                 bulletin->valid_bitmap |= 1 << LINK_VALID;
1496                 bulletin->link_speed = bnx2x_max_speed_cap(bp);
1497                 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1498         } else {
1499                 update = false;
1500         }
1501 
1502         if (update) {
1503                 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1504                    "vf %d mode %u speed %d flags %x\n", idx,
1505                    vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1506 
1507                 /* Post update on VF's bulletin board */
1508                 rc = bnx2x_post_vf_bulletin(bp, idx);
1509                 if (rc) {
1510                         BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1511                         goto out;
1512                 }
1513         }
1514 
1515 out:
1516         mutex_unlock(&bp->vfdb->bulletin_mutex);
1517         return rc;
1518 }
1519 
1520 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1521 {
1522         struct bnx2x *bp = netdev_priv(dev);
1523         struct bnx2x_virtf *vf = BP_VF(bp, idx);
1524 
1525         if (!vf)
1526                 return -EINVAL;
1527 
1528         if (vf->link_cfg == link_state)
1529                 return 0; /* nothing todo */
1530 
1531         vf->link_cfg = link_state;
1532 
1533         return bnx2x_iov_link_update_vf(bp, idx);
1534 }
1535 
1536 void bnx2x_iov_link_update(struct bnx2x *bp)
1537 {
1538         int vfid;
1539 
1540         if (!IS_SRIOV(bp))
1541                 return;
1542 
1543         for_each_vf(bp, vfid)
1544                 bnx2x_iov_link_update_vf(bp, vfid);
1545 }
1546 
1547 /* called by bnx2x_nic_load */
1548 int bnx2x_iov_nic_init(struct bnx2x *bp)
1549 {
1550         int vfid;
1551 
1552         if (!IS_SRIOV(bp)) {
1553                 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1554                 return 0;
1555         }
1556 
1557         DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1558 
1559         /* let FLR complete ... */
1560         msleep(100);
1561 
1562         /* initialize vf database */
1563         for_each_vf(bp, vfid) {
1564                 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1565 
1566                 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1567                         BNX2X_CIDS_PER_VF;
1568 
1569                 union cdu_context *base_cxt = (union cdu_context *)
1570                         BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1571                         (base_vf_cid & (ILT_PAGE_CIDS-1));
1572 
1573                 DP(BNX2X_MSG_IOV,
1574                    "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1575                    vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1576                    BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1577 
1578                 /* init statically provisioned resources */
1579                 bnx2x_iov_static_resc(bp, vf);
1580 
1581                 /* queues are initialized during VF-ACQUIRE */
1582                 vf->filter_state = 0;
1583                 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1584 
1585                 bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1586                                        vf_vlan_rules_cnt(vf));
1587                 bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1588                                        vf_mac_rules_cnt(vf));
1589 
1590                 /*  init mcast object - This object will be re-initialized
1591                  *  during VF-ACQUIRE with the proper cl_id and cid.
1592                  *  It needs to be initialized here so that it can be safely
1593                  *  handled by a subsequent FLR flow.
1594                  */
1595                 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1596                                      0xFF, 0xFF, 0xFF,
1597                                      bnx2x_vf_sp(bp, vf, mcast_rdata),
1598                                      bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1599                                      BNX2X_FILTER_MCAST_PENDING,
1600                                      &vf->filter_state,
1601                                      BNX2X_OBJ_TYPE_RX_TX);
1602 
1603                 /* set the mailbox message addresses */
1604                 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1605                         (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1606                         MBX_MSG_ALIGNED_SIZE);
1607 
1608                 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1609                         vfid * MBX_MSG_ALIGNED_SIZE;
1610 
1611                 /* Enable vf mailbox */
1612                 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1613         }
1614 
1615         /* Final VF init */
1616         for_each_vf(bp, vfid) {
1617                 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1618 
1619                 /* fill in the BDF and bars */
1620                 vf->domain = bnx2x_vf_domain(bp, vfid);
1621                 vf->bus = bnx2x_vf_bus(bp, vfid);
1622                 vf->devfn = bnx2x_vf_devfn(bp, vfid);
1623                 bnx2x_vf_set_bars(bp, vf);
1624 
1625                 DP(BNX2X_MSG_IOV,
1626                    "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1627                    vf->abs_vfid, vf->bus, vf->devfn,
1628                    (unsigned)vf->bars[0].bar, vf->bars[0].size,
1629                    (unsigned)vf->bars[1].bar, vf->bars[1].size,
1630                    (unsigned)vf->bars[2].bar, vf->bars[2].size);
1631         }
1632 
1633         return 0;
1634 }
1635 
1636 /* called by bnx2x_chip_cleanup */
1637 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1638 {
1639         int i;
1640 
1641         if (!IS_SRIOV(bp))
1642                 return 0;
1643 
1644         /* release all the VFs */
1645         for_each_vf(bp, i)
1646                 bnx2x_vf_release(bp, BP_VF(bp, i));
1647 
1648         return 0;
1649 }
1650 
1651 /* called by bnx2x_init_hw_func, returns the next ilt line */
1652 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1653 {
1654         int i;
1655         struct bnx2x_ilt *ilt = BP_ILT(bp);
1656 
1657         if (!IS_SRIOV(bp))
1658                 return line;
1659 
1660         /* set vfs ilt lines */
1661         for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1662                 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1663 
1664                 ilt->lines[line+i].page = hw_cxt->addr;
1665                 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1666                 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1667         }
1668         return line + i;
1669 }
1670 
1671 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1672 {
1673         return ((cid >= BNX2X_FIRST_VF_CID) &&
1674                 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1675 }
1676 
1677 static
1678 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1679                                         struct bnx2x_vf_queue *vfq,
1680                                         union event_ring_elem *elem)
1681 {
1682         unsigned long ramrod_flags = 0;
1683         int rc = 0;
1684         u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1685 
1686         /* Always push next commands out, don't wait here */
1687         set_bit(RAMROD_CONT, &ramrod_flags);
1688 
1689         switch (echo >> BNX2X_SWCID_SHIFT) {
1690         case BNX2X_FILTER_MAC_PENDING:
1691                 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1692                                            &ramrod_flags);
1693                 break;
1694         case BNX2X_FILTER_VLAN_PENDING:
1695                 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1696                                             &ramrod_flags);
1697                 break;
1698         default:
1699                 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1700                 return;
1701         }
1702         if (rc < 0)
1703                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1704         else if (rc > 0)
1705                 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1706 }
1707 
1708 static
1709 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1710                                struct bnx2x_virtf *vf)
1711 {
1712         struct bnx2x_mcast_ramrod_params rparam = {NULL};
1713         int rc;
1714 
1715         rparam.mcast_obj = &vf->mcast_obj;
1716         vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1717 
1718         /* If there are pending mcast commands - send them */
1719         if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1720                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1721                 if (rc < 0)
1722                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1723                                   rc);
1724         }
1725 }
1726 
1727 static
1728 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1729                                  struct bnx2x_virtf *vf)
1730 {
1731         smp_mb__before_atomic();
1732         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1733         smp_mb__after_atomic();
1734 }
1735 
1736 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1737                                            struct bnx2x_virtf *vf)
1738 {
1739         vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1740 }
1741 
1742 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1743 {
1744         struct bnx2x_virtf *vf;
1745         int qidx = 0, abs_vfid;
1746         u8 opcode;
1747         u16 cid = 0xffff;
1748 
1749         if (!IS_SRIOV(bp))
1750                 return 1;
1751 
1752         /* first get the cid - the only events we handle here are cfc-delete
1753          * and set-mac completion
1754          */
1755         opcode = elem->message.opcode;
1756 
1757         switch (opcode) {
1758         case EVENT_RING_OPCODE_CFC_DEL:
1759                 cid = SW_CID(elem->message.data.cfc_del_event.cid);
1760                 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1761                 break;
1762         case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1763         case EVENT_RING_OPCODE_MULTICAST_RULES:
1764         case EVENT_RING_OPCODE_FILTERS_RULES:
1765         case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1766                 cid = SW_CID(elem->message.data.eth_event.echo);
1767                 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1768                 break;
1769         case EVENT_RING_OPCODE_VF_FLR:
1770                 abs_vfid = elem->message.data.vf_flr_event.vf_id;
1771                 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1772                    abs_vfid);
1773                 goto get_vf;
1774         case EVENT_RING_OPCODE_MALICIOUS_VF:
1775                 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1776                 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1777                           abs_vfid,
1778                           elem->message.data.malicious_vf_event.err_id);
1779                 goto get_vf;
1780         default:
1781                 return 1;
1782         }
1783 
1784         /* check if the cid is the VF range */
1785         if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1786                 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1787                 return 1;
1788         }
1789 
1790         /* extract vf and rxq index from vf_cid - relies on the following:
1791          * 1. vfid on cid reflects the true abs_vfid
1792          * 2. The max number of VFs (per path) is 64
1793          */
1794         qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1795         abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1796 get_vf:
1797         vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1798 
1799         if (!vf) {
1800                 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1801                           cid, abs_vfid);
1802                 return 0;
1803         }
1804 
1805         switch (opcode) {
1806         case EVENT_RING_OPCODE_CFC_DEL:
1807                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1808                    vf->abs_vfid, qidx);
1809                 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1810                                                        &vfq_get(vf,
1811                                                                 qidx)->sp_obj,
1812                                                        BNX2X_Q_CMD_CFC_DEL);
1813                 break;
1814         case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1815                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1816                    vf->abs_vfid, qidx);
1817                 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1818                 break;
1819         case EVENT_RING_OPCODE_MULTICAST_RULES:
1820                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1821                    vf->abs_vfid, qidx);
1822                 bnx2x_vf_handle_mcast_eqe(bp, vf);
1823                 break;
1824         case EVENT_RING_OPCODE_FILTERS_RULES:
1825                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1826                    vf->abs_vfid, qidx);
1827                 bnx2x_vf_handle_filters_eqe(bp, vf);
1828                 break;
1829         case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1830                 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1831                    vf->abs_vfid, qidx);
1832                 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1833                 /* fall through */
1834         case EVENT_RING_OPCODE_VF_FLR:
1835                 /* Do nothing for now */
1836                 return 0;
1837         case EVENT_RING_OPCODE_MALICIOUS_VF:
1838                 vf->malicious = true;
1839                 return 0;
1840         }
1841 
1842         return 0;
1843 }
1844 
1845 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1846 {
1847         /* extract the vf from vf_cid - relies on the following:
1848          * 1. vfid on cid reflects the true abs_vfid
1849          * 2. The max number of VFs (per path) is 64
1850          */
1851         int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1852         return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1853 }
1854 
1855 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1856                                 struct bnx2x_queue_sp_obj **q_obj)
1857 {
1858         struct bnx2x_virtf *vf;
1859 
1860         if (!IS_SRIOV(bp))
1861                 return;
1862 
1863         vf = bnx2x_vf_by_cid(bp, vf_cid);
1864 
1865         if (vf) {
1866                 /* extract queue index from vf_cid - relies on the following:
1867                  * 1. vfid on cid reflects the true abs_vfid
1868                  * 2. The max number of VFs (per path) is 64
1869                  */
1870                 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1871                 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1872         } else {
1873                 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1874         }
1875 }
1876 
1877 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1878 {
1879         int i;
1880         int first_queue_query_index, num_queues_req;
1881         dma_addr_t cur_data_offset;
1882         struct stats_query_entry *cur_query_entry;
1883         u8 stats_count = 0;
1884         bool is_fcoe = false;
1885 
1886         if (!IS_SRIOV(bp))
1887                 return;
1888 
1889         if (!NO_FCOE(bp))
1890                 is_fcoe = true;
1891 
1892         /* fcoe adds one global request and one queue request */
1893         num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1894         first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1895                 (is_fcoe ? 0 : 1);
1896 
1897         DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1898                "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1899                BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1900                first_queue_query_index + num_queues_req);
1901 
1902         cur_data_offset = bp->fw_stats_data_mapping +
1903                 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1904                 num_queues_req * sizeof(struct per_queue_stats);
1905 
1906         cur_query_entry = &bp->fw_stats_req->
1907                 query[first_queue_query_index + num_queues_req];
1908 
1909         for_each_vf(bp, i) {
1910                 int j;
1911                 struct bnx2x_virtf *vf = BP_VF(bp, i);
1912 
1913                 if (vf->state != VF_ENABLED) {
1914                         DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1915                                "vf %d not enabled so no stats for it\n",
1916                                vf->abs_vfid);
1917                         continue;
1918                 }
1919 
1920                 if (vf->malicious) {
1921                         DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1922                                "vf %d malicious so no stats for it\n",
1923                                vf->abs_vfid);
1924                         continue;
1925                 }
1926 
1927                 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1928                        "add addresses for vf %d\n", vf->abs_vfid);
1929                 for_each_vfq(vf, j) {
1930                         struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1931 
1932                         dma_addr_t q_stats_addr =
1933                                 vf->fw_stat_map + j * vf->stats_stride;
1934 
1935                         /* collect stats fro active queues only */
1936                         if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1937                             BNX2X_Q_LOGICAL_STATE_STOPPED)
1938                                 continue;
1939 
1940                         /* create stats query entry for this queue */
1941                         cur_query_entry->kind = STATS_TYPE_QUEUE;
1942                         cur_query_entry->index = vfq_stat_id(vf, rxq);
1943                         cur_query_entry->funcID =
1944                                 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1945                         cur_query_entry->address.hi =
1946                                 cpu_to_le32(U64_HI(q_stats_addr));
1947                         cur_query_entry->address.lo =
1948                                 cpu_to_le32(U64_LO(q_stats_addr));
1949                         DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1950                                "added address %x %x for vf %d queue %d client %d\n",
1951                                cur_query_entry->address.hi,
1952                                cur_query_entry->address.lo,
1953                                cur_query_entry->funcID,
1954                                j, cur_query_entry->index);
1955                         cur_query_entry++;
1956                         cur_data_offset += sizeof(struct per_queue_stats);
1957                         stats_count++;
1958 
1959                         /* all stats are coalesced to the leading queue */
1960                         if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1961                                 break;
1962                 }
1963         }
1964         bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1965 }
1966 
1967 /* VF API helpers */
1968 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1969                                 u8 enable)
1970 {
1971         u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1972         u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1973 
1974         REG_WR(bp, reg, val);
1975 }
1976 
1977 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1978 {
1979         int i;
1980 
1981         for_each_vfq(vf, i)
1982                 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1983                                     vfq_qzone_id(vf, vfq_get(vf, i)), false);
1984 }
1985 
1986 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1987 {
1988         u32 val;
1989 
1990         /* clear the VF configuration - pretend */
1991         bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1992         val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1993         val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1994                  IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1995         REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1996         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1997 }
1998 
1999 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2000 {
2001         return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2002                      BNX2X_VF_MAX_QUEUES);
2003 }
2004 
2005 static
2006 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2007                             struct vf_pf_resc_request *req_resc)
2008 {
2009         u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2010         u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2011 
2012         return ((req_resc->num_rxqs <= rxq_cnt) &&
2013                 (req_resc->num_txqs <= txq_cnt) &&
2014                 (req_resc->num_sbs <= vf_sb_count(vf))   &&
2015                 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2016                 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2017 }
2018 
2019 /* CORE VF API */
2020 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2021                      struct vf_pf_resc_request *resc)
2022 {
2023         int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2024                 BNX2X_CIDS_PER_VF;
2025 
2026         union cdu_context *base_cxt = (union cdu_context *)
2027                 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2028                 (base_vf_cid & (ILT_PAGE_CIDS-1));
2029         int i;
2030 
2031         /* if state is 'acquired' the VF was not released or FLR'd, in
2032          * this case the returned resources match the acquired already
2033          * acquired resources. Verify that the requested numbers do
2034          * not exceed the already acquired numbers.
2035          */
2036         if (vf->state == VF_ACQUIRED) {
2037                 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2038                    vf->abs_vfid);
2039 
2040                 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2041                         BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2042                                   vf->abs_vfid);
2043                         return -EINVAL;
2044                 }
2045                 return 0;
2046         }
2047 
2048         /* Otherwise vf state must be 'free' or 'reset' */
2049         if (vf->state != VF_FREE && vf->state != VF_RESET) {
2050                 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2051                           vf->abs_vfid, vf->state);
2052                 return -EINVAL;
2053         }
2054 
2055         /* static allocation:
2056          * the global maximum number are fixed per VF. Fail the request if
2057          * requested number exceed these globals
2058          */
2059         if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2060                 DP(BNX2X_MSG_IOV,
2061                    "cannot fulfill vf resource request. Placing maximal available values in response\n");
2062                 /* set the max resource in the vf */
2063                 return -ENOMEM;
2064         }
2065 
2066         /* Set resources counters - 0 request means max available */
2067         vf_sb_count(vf) = resc->num_sbs;
2068         vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2069         vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2070 
2071         DP(BNX2X_MSG_IOV,
2072            "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2073            vf_sb_count(vf), vf_rxq_count(vf),
2074            vf_txq_count(vf), vf_mac_rules_cnt(vf),
2075            vf_vlan_rules_cnt(vf));
2076 
2077         /* Initialize the queues */
2078         if (!vf->vfqs) {
2079                 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2080                 return -EINVAL;
2081         }
2082 
2083         for_each_vfq(vf, i) {
2084                 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2085 
2086                 if (!q) {
2087                         BNX2X_ERR("q number %d was not allocated\n", i);
2088                         return -EINVAL;
2089                 }
2090 
2091                 q->index = i;
2092                 q->cxt = &((base_cxt + i)->eth);
2093                 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2094 
2095                 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2096                    vf->abs_vfid, i, q->index, q->cid, q->cxt);
2097 
2098                 /* init SP objects */
2099                 bnx2x_vfq_init(bp, vf, q);
2100         }
2101         vf->state = VF_ACQUIRED;
2102         return 0;
2103 }
2104 
2105 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2106 {
2107         struct bnx2x_func_init_params func_init = {0};
2108         int i;
2109 
2110         /* the sb resources are initialized at this point, do the
2111          * FW/HW initializations
2112          */
2113         for_each_vf_sb(vf, i)
2114                 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2115                               vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2116 
2117         /* Sanity checks */
2118         if (vf->state != VF_ACQUIRED) {
2119                 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2120                    vf->abs_vfid, vf->state);
2121                 return -EINVAL;
2122         }
2123 
2124         /* let FLR complete ... */
2125         msleep(100);
2126 
2127         /* FLR cleanup epilogue */
2128         if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2129                 return -EBUSY;
2130 
2131         /* reset IGU VF statistics: MSIX */
2132         REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2133 
2134         /* function setup */
2135         func_init.pf_id = BP_FUNC(bp);
2136         func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2137         bnx2x_func_init(bp, &func_init);
2138 
2139         /* Enable the vf */
2140         bnx2x_vf_enable_access(bp, vf->abs_vfid);
2141         bnx2x_vf_enable_traffic(bp, vf);
2142 
2143         /* queue protection table */
2144         for_each_vfq(vf, i)
2145                 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2146                                     vfq_qzone_id(vf, vfq_get(vf, i)), true);
2147 
2148         vf->state = VF_ENABLED;
2149 
2150         /* update vf bulletin board */
2151         bnx2x_post_vf_bulletin(bp, vf->index);
2152 
2153         return 0;
2154 }
2155 
2156 struct set_vf_state_cookie {
2157         struct bnx2x_virtf *vf;
2158         u8 state;
2159 };
2160 
2161 static void bnx2x_set_vf_state(void *cookie)
2162 {
2163         struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2164 
2165         p->vf->state = p->state;
2166 }
2167 
2168 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2169 {
2170         int rc = 0, i;
2171 
2172         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2173 
2174         /* Close all queues */
2175         for (i = 0; i < vf_rxq_count(vf); i++) {
2176                 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2177                 if (rc)
2178                         goto op_err;
2179         }
2180 
2181         /* disable the interrupts */
2182         DP(BNX2X_MSG_IOV, "disabling igu\n");
2183         bnx2x_vf_igu_disable(bp, vf);
2184 
2185         /* disable the VF */
2186         DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2187         bnx2x_vf_clr_qtbl(bp, vf);
2188 
2189         /* need to make sure there are no outstanding stats ramrods which may
2190          * cause the device to access the VF's stats buffer which it will free
2191          * as soon as we return from the close flow.
2192          */
2193         {
2194                 struct set_vf_state_cookie cookie;
2195 
2196                 cookie.vf = vf;
2197                 cookie.state = VF_ACQUIRED;
2198                 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2199                 if (rc)
2200                         goto op_err;
2201         }
2202 
2203         DP(BNX2X_MSG_IOV, "set state to acquired\n");
2204 
2205         return 0;
2206 op_err:
2207         BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2208         return rc;
2209 }
2210 
2211 /* VF release can be called either: 1. The VF was acquired but
2212  * not enabled 2. the vf was enabled or in the process of being
2213  * enabled
2214  */
2215 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2216 {
2217         int rc;
2218 
2219         DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2220            vf->state == VF_FREE ? "Free" :
2221            vf->state == VF_ACQUIRED ? "Acquired" :
2222            vf->state == VF_ENABLED ? "Enabled" :
2223            vf->state == VF_RESET ? "Reset" :
2224            "Unknown");
2225 
2226         switch (vf->state) {
2227         case VF_ENABLED:
2228                 rc = bnx2x_vf_close(bp, vf);
2229                 if (rc)
2230                         goto op_err;
2231                 /* Fall through - to release resources */
2232         case VF_ACQUIRED:
2233                 DP(BNX2X_MSG_IOV, "about to free resources\n");
2234                 bnx2x_vf_free_resc(bp, vf);
2235                 break;
2236 
2237         case VF_FREE:
2238         case VF_RESET:
2239         default:
2240                 break;
2241         }
2242         return 0;
2243 op_err:
2244         BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2245         return rc;
2246 }
2247 
2248 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2249                         struct bnx2x_config_rss_params *rss)
2250 {
2251         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2252         set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2253         return bnx2x_config_rss(bp, rss);
2254 }
2255 
2256 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2257                         struct vfpf_tpa_tlv *tlv,
2258                         struct bnx2x_queue_update_tpa_params *params)
2259 {
2260         aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2261         struct bnx2x_queue_state_params qstate;
2262         int qid, rc = 0;
2263 
2264         DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2265 
2266         /* Set ramrod params */
2267         memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2268         memcpy(&qstate.params.update_tpa, params,
2269                sizeof(struct bnx2x_queue_update_tpa_params));
2270         qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2271         set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2272 
2273         for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2274                 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2275                 qstate.params.update_tpa.sge_map = sge_addr[qid];
2276                 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2277                    vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2278                    U64_LO(sge_addr[qid]));
2279                 rc = bnx2x_queue_state_change(bp, &qstate);
2280                 if (rc) {
2281                         BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2282                                   U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2283                                   vf->abs_vfid, qid);
2284                         return rc;
2285                 }
2286         }
2287 
2288         return rc;
2289 }
2290 
2291 /* VF release ~ VF close + VF release-resources
2292  * Release is the ultimate SW shutdown and is called whenever an
2293  * irrecoverable error is encountered.
2294  */
2295 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2296 {
2297         int rc;
2298 
2299         DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2300         bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2301 
2302         rc = bnx2x_vf_free(bp, vf);
2303         if (rc)
2304                 WARN(rc,
2305                      "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2306                      vf->abs_vfid, rc);
2307         bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2308         return rc;
2309 }
2310 
2311 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2312                               enum channel_tlvs tlv)
2313 {
2314         /* we don't lock the channel for unsupported tlvs */
2315         if (!bnx2x_tlv_supported(tlv)) {
2316                 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2317                 return;
2318         }
2319 
2320         /* lock the channel */
2321         mutex_lock(&vf->op_mutex);
2322 
2323         /* record the locking op */
2324         vf->op_current = tlv;
2325 
2326         /* log the lock */
2327         DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2328            vf->abs_vfid, tlv);
2329 }
2330 
2331 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2332                                 enum channel_tlvs expected_tlv)
2333 {
2334         enum channel_tlvs current_tlv;
2335 
2336         if (!vf) {
2337                 BNX2X_ERR("VF was %p\n", vf);
2338                 return;
2339         }
2340 
2341         current_tlv = vf->op_current;
2342 
2343         /* we don't unlock the channel for unsupported tlvs */
2344         if (!bnx2x_tlv_supported(expected_tlv))
2345                 return;
2346 
2347         WARN(expected_tlv != vf->op_current,
2348              "lock mismatch: expected %d found %d", expected_tlv,
2349              vf->op_current);
2350 
2351         /* record the locking op */
2352         vf->op_current = CHANNEL_TLV_NONE;
2353 
2354         /* lock the channel */
2355         mutex_unlock(&vf->op_mutex);
2356 
2357         /* log the unlock */
2358         DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2359            vf->abs_vfid, current_tlv);
2360 }
2361 
2362 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2363 {
2364         struct bnx2x_queue_state_params q_params;
2365         u32 prev_flags;
2366         int i, rc;
2367 
2368         /* Verify changes are needed and record current Tx switching state */
2369         prev_flags = bp->flags;
2370         if (enable)
2371                 bp->flags |= TX_SWITCHING;
2372         else
2373                 bp->flags &= ~TX_SWITCHING;
2374         if (prev_flags == bp->flags)
2375                 return 0;
2376 
2377         /* Verify state enables the sending of queue ramrods */
2378         if ((bp->state != BNX2X_STATE_OPEN) ||
2379             (bnx2x_get_q_logical_state(bp,
2380                                       &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2381              BNX2X_Q_LOGICAL_STATE_ACTIVE))
2382                 return 0;
2383 
2384         /* send q. update ramrod to configure Tx switching */
2385         memset(&q_params, 0, sizeof(q_params));
2386         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2387         q_params.cmd = BNX2X_Q_CMD_UPDATE;
2388         __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2389                   &q_params.params.update.update_flags);
2390         if (enable)
2391                 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2392                           &q_params.params.update.update_flags);
2393         else
2394                 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2395                             &q_params.params.update.update_flags);
2396 
2397         /* send the ramrod on all the queues of the PF */
2398         for_each_eth_queue(bp, i) {
2399                 struct bnx2x_fastpath *fp = &bp->fp[i];
2400                 int tx_idx;
2401 
2402                 /* Set the appropriate Queue object */
2403                 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2404 
2405                 for (tx_idx = FIRST_TX_COS_INDEX;
2406                      tx_idx < fp->max_cos; tx_idx++) {
2407                         q_params.params.update.cid_index = tx_idx;
2408 
2409                         /* Update the Queue state */
2410                         rc = bnx2x_queue_state_change(bp, &q_params);
2411                         if (rc) {
2412                                 BNX2X_ERR("Failed to configure Tx switching\n");
2413                                 return rc;
2414                         }
2415                 }
2416         }
2417 
2418         DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2419         return 0;
2420 }
2421 
2422 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2423 {
2424         struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2425 
2426         if (!IS_SRIOV(bp)) {
2427                 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2428                 return -EINVAL;
2429         }
2430 
2431         DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2432            num_vfs_param, BNX2X_NR_VIRTFN(bp));
2433 
2434         /* HW channel is only operational when PF is up */
2435         if (bp->state != BNX2X_STATE_OPEN) {
2436                 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2437                 return -EINVAL;
2438         }
2439 
2440         /* we are always bound by the total_vfs in the configuration space */
2441         if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2442                 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2443                           num_vfs_param, BNX2X_NR_VIRTFN(bp));
2444                 num_vfs_param = BNX2X_NR_VIRTFN(bp);
2445         }
2446 
2447         bp->requested_nr_virtfn = num_vfs_param;
2448         if (num_vfs_param == 0) {
2449                 bnx2x_set_pf_tx_switching(bp, false);
2450                 bnx2x_disable_sriov(bp);
2451                 return 0;
2452         } else {
2453                 return bnx2x_enable_sriov(bp);
2454         }
2455 }
2456 
2457 #define IGU_ENTRY_SIZE 4
2458 
2459 int bnx2x_enable_sriov(struct bnx2x *bp)
2460 {
2461         int rc = 0, req_vfs = bp->requested_nr_virtfn;
2462         int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2463         u32 igu_entry, address;
2464         u16 num_vf_queues;
2465 
2466         if (req_vfs == 0)
2467                 return 0;
2468 
2469         first_vf = bp->vfdb->sriov.first_vf_in_pf;
2470 
2471         /* statically distribute vf sb pool between VFs */
2472         num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2473                               BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2474 
2475         /* zero previous values learned from igu cam */
2476         for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2477                 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2478 
2479                 vf->sb_count = 0;
2480                 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2481         }
2482         bp->vfdb->vf_sbs_pool = 0;
2483 
2484         /* prepare IGU cam */
2485         sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2486         address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2487         for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2488                 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2489                         igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2490                                 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2491                                 IGU_REG_MAPPING_MEMORY_VALID;
2492                         DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2493                            sb_idx, vf_idx);
2494                         REG_WR(bp, address, igu_entry);
2495                         sb_idx++;
2496                         address += IGU_ENTRY_SIZE;
2497                 }
2498         }
2499 
2500         /* Reinitialize vf database according to igu cam */
2501         bnx2x_get_vf_igu_cam_info(bp);
2502 
2503         DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2504            BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2505 
2506         qcount = 0;
2507         for_each_vf(bp, vf_idx) {
2508                 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2509 
2510                 /* set local queue arrays */
2511                 vf->vfqs = &bp->vfdb->vfqs[qcount];
2512                 qcount += vf_sb_count(vf);
2513                 bnx2x_iov_static_resc(bp, vf);
2514         }
2515 
2516         /* prepare msix vectors in VF configuration space - the value in the
2517          * PCI configuration space should be the index of the last entry,
2518          * namely one less than the actual size of the table
2519          */
2520         for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2521                 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2522                 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2523                        num_vf_queues - 1);
2524                 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2525                    vf_idx, num_vf_queues - 1);
2526         }
2527         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2528 
2529         /* enable sriov. This will probe all the VFs, and consequentially cause
2530          * the "acquire" messages to appear on the VF PF channel.
2531          */
2532         DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2533         bnx2x_disable_sriov(bp);
2534 
2535         rc = bnx2x_set_pf_tx_switching(bp, true);
2536         if (rc)
2537                 return rc;
2538 
2539         rc = pci_enable_sriov(bp->pdev, req_vfs);
2540         if (rc) {
2541                 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2542                 return rc;
2543         }
2544         DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2545         return req_vfs;
2546 }
2547 
2548 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2549 {
2550         int vfidx;
2551         struct pf_vf_bulletin_content *bulletin;
2552 
2553         DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2554         for_each_vf(bp, vfidx) {
2555                 bulletin = BP_VF_BULLETIN(bp, vfidx);
2556                 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2557                         bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2558                                           htons(ETH_P_8021Q));
2559         }
2560 }
2561 
2562 void bnx2x_disable_sriov(struct bnx2x *bp)
2563 {
2564         if (pci_vfs_assigned(bp->pdev)) {
2565                 DP(BNX2X_MSG_IOV,
2566                    "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2567                 return;
2568         }
2569 
2570         pci_disable_sriov(bp->pdev);
2571 }
2572 
2573 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2574                             struct bnx2x_virtf **vf,
2575                             struct pf_vf_bulletin_content **bulletin,
2576                             bool test_queue)
2577 {
2578         if (bp->state != BNX2X_STATE_OPEN) {
2579                 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2580                 return -EINVAL;
2581         }
2582 
2583         if (!IS_SRIOV(bp)) {
2584                 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2585                 return -EINVAL;
2586         }
2587 
2588         if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2589                 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2590                           vfidx, BNX2X_NR_VIRTFN(bp));
2591                 return -EINVAL;
2592         }
2593 
2594         /* init members */
2595         *vf = BP_VF(bp, vfidx);
2596         *bulletin = BP_VF_BULLETIN(bp, vfidx);
2597 
2598         if (!*vf) {
2599                 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2600                 return -EINVAL;
2601         }
2602 
2603         if (test_queue && !(*vf)->vfqs) {
2604                 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2605                           vfidx);
2606                 return -EINVAL;
2607         }
2608 
2609         if (!*bulletin) {
2610                 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2611                           vfidx);
2612                 return -EINVAL;
2613         }
2614 
2615         return 0;
2616 }
2617 
2618 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2619                         struct ifla_vf_info *ivi)
2620 {
2621         struct bnx2x *bp = netdev_priv(dev);
2622         struct bnx2x_virtf *vf = NULL;
2623         struct pf_vf_bulletin_content *bulletin = NULL;
2624         struct bnx2x_vlan_mac_obj *mac_obj;
2625         struct bnx2x_vlan_mac_obj *vlan_obj;
2626         int rc;
2627 
2628         /* sanity and init */
2629         rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2630         if (rc)
2631                 return rc;
2632 
2633         mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2634         vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2635         if (!mac_obj || !vlan_obj) {
2636                 BNX2X_ERR("VF partially initialized\n");
2637                 return -EINVAL;
2638         }
2639 
2640         ivi->vf = vfidx;
2641         ivi->qos = 0;
2642         ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2643         ivi->min_tx_rate = 0;
2644         ivi->spoofchk = vf->spoofchk ? 1 : 0;
2645         ivi->linkstate = vf->link_cfg;
2646         if (vf->state == VF_ENABLED) {
2647                 /* mac and vlan are in vlan_mac objects */
2648                 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2649                         mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2650                                                 0, ETH_ALEN);
2651                         vlan_obj->get_n_elements(bp, vlan_obj, 1,
2652                                                  (u8 *)&ivi->vlan, 0,
2653                                                  VLAN_HLEN);
2654                 }
2655         } else {
2656                 mutex_lock(&bp->vfdb->bulletin_mutex);
2657                 /* mac */
2658                 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2659                         /* mac configured by ndo so its in bulletin board */
2660                         memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2661                 else
2662                         /* function has not been loaded yet. Show mac as 0s */
2663                         eth_zero_addr(ivi->mac);
2664 
2665                 /* vlan */
2666                 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2667                         /* vlan configured by ndo so its in bulletin board */
2668                         memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2669                 else
2670                         /* function has not been loaded yet. Show vlans as 0s */
2671                         memset(&ivi->vlan, 0, VLAN_HLEN);
2672 
2673                 mutex_unlock(&bp->vfdb->bulletin_mutex);
2674         }
2675 
2676         return 0;
2677 }
2678 
2679 /* New mac for VF. Consider these cases:
2680  * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2681  *    supply at acquire.
2682  * 2. VF has already been acquired but has not yet initialized - store in local
2683  *    bulletin board. mac will be posted on VF bulletin board after VF init. VF
2684  *    will configure this mac when it is ready.
2685  * 3. VF has already initialized but has not yet setup a queue - post the new
2686  *    mac on VF's bulletin board right now. VF will configure this mac when it
2687  *    is ready.
2688  * 4. VF has already set a queue - delete any macs already configured for this
2689  *    queue and manually config the new mac.
2690  * In any event, once this function has been called refuse any attempts by the
2691  * VF to configure any mac for itself except for this mac. In case of a race
2692  * where the VF fails to see the new post on its bulletin board before sending a
2693  * mac configuration request, the PF will simply fail the request and VF can try
2694  * again after consulting its bulletin board.
2695  */
2696 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2697 {
2698         struct bnx2x *bp = netdev_priv(dev);
2699         int rc, q_logical_state;
2700         struct bnx2x_virtf *vf = NULL;
2701         struct pf_vf_bulletin_content *bulletin = NULL;
2702 
2703         if (!is_valid_ether_addr(mac)) {
2704                 BNX2X_ERR("mac address invalid\n");
2705                 return -EINVAL;
2706         }
2707 
2708         /* sanity and init */
2709         rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2710         if (rc)
2711                 return rc;
2712 
2713         mutex_lock(&bp->vfdb->bulletin_mutex);
2714 
2715         /* update PF's copy of the VF's bulletin. Will no longer accept mac
2716          * configuration requests from vf unless match this mac
2717          */
2718         bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2719         memcpy(bulletin->mac, mac, ETH_ALEN);
2720 
2721         /* Post update on VF's bulletin board */
2722         rc = bnx2x_post_vf_bulletin(bp, vfidx);
2723 
2724         /* release lock before checking return code */
2725         mutex_unlock(&bp->vfdb->bulletin_mutex);
2726 
2727         if (rc) {
2728                 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2729                 return rc;
2730         }
2731 
2732         q_logical_state =
2733                 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2734         if (vf->state == VF_ENABLED &&
2735             q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2736                 /* configure the mac in device on this vf's queue */
2737                 unsigned long ramrod_flags = 0;
2738                 struct bnx2x_vlan_mac_obj *mac_obj;
2739 
2740                 /* User should be able to see failure reason in system logs */
2741                 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2742                         return -EINVAL;
2743 
2744                 /* must lock vfpf channel to protect against vf flows */
2745                 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2746 
2747                 /* remove existing eth macs */
2748                 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2749                 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2750                 if (rc) {
2751                         BNX2X_ERR("failed to delete eth macs\n");
2752                         rc = -EINVAL;
2753                         goto out;
2754                 }
2755 
2756                 /* remove existing uc list macs */
2757                 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2758                 if (rc) {
2759                         BNX2X_ERR("failed to delete uc_list macs\n");
2760                         rc = -EINVAL;
2761                         goto out;
2762                 }
2763 
2764                 /* configure the new mac to device */
2765                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2766                 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2767                                   BNX2X_ETH_MAC, &ramrod_flags);
2768 
2769 out:
2770                 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2771         }
2772 
2773         return rc;
2774 }
2775 
2776 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2777                                          struct bnx2x_virtf *vf, bool accept)
2778 {
2779         struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2780         unsigned long accept_flags;
2781 
2782         /* need to remove/add the VF's accept_any_vlan bit */
2783         accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2784         if (accept)
2785                 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2786         else
2787                 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2788 
2789         bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2790                               accept_flags);
2791         bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2792         bnx2x_config_rx_mode(bp, &rx_ramrod);
2793 }
2794 
2795 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2796                                     u16 vlan, bool add)
2797 {
2798         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2799         unsigned long ramrod_flags = 0;
2800         int rc = 0;
2801 
2802         /* configure the new vlan to device */
2803         memset(&ramrod_param, 0, sizeof(ramrod_param));
2804         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2805         ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2806         ramrod_param.ramrod_flags = ramrod_flags;
2807         ramrod_param.user_req.u.vlan.vlan = vlan;
2808         ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2809                                         : BNX2X_VLAN_MAC_DEL;
2810         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2811         if (rc) {
2812                 BNX2X_ERR("failed to configure vlan\n");
2813                 return -EINVAL;
2814         }
2815 
2816         return 0;
2817 }
2818 
2819 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2820                       __be16 vlan_proto)
2821 {
2822         struct pf_vf_bulletin_content *bulletin = NULL;
2823         struct bnx2x *bp = netdev_priv(dev);
2824         struct bnx2x_vlan_mac_obj *vlan_obj;
2825         unsigned long vlan_mac_flags = 0;
2826         unsigned long ramrod_flags = 0;
2827         struct bnx2x_virtf *vf = NULL;
2828         int i, rc;
2829 
2830         if (vlan > 4095) {
2831                 BNX2X_ERR("illegal vlan value %d\n", vlan);
2832                 return -EINVAL;
2833         }
2834 
2835         if (vlan_proto != htons(ETH_P_8021Q))
2836                 return -EPROTONOSUPPORT;
2837 
2838         DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2839            vfidx, vlan, 0);
2840 
2841         /* sanity and init */
2842         rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2843         if (rc)
2844                 return rc;
2845 
2846         /* update PF's copy of the VF's bulletin. No point in posting the vlan
2847          * to the VF since it doesn't have anything to do with it. But it useful
2848          * to store it here in case the VF is not up yet and we can only
2849          * configure the vlan later when it does. Treat vlan id 0 as remove the
2850          * Host tag.
2851          */
2852         mutex_lock(&bp->vfdb->bulletin_mutex);
2853 
2854         if (vlan > 0)
2855                 bulletin->valid_bitmap |= 1 << VLAN_VALID;
2856         else
2857                 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2858         bulletin->vlan = vlan;
2859 
2860         /* Post update on VF's bulletin board */
2861         rc = bnx2x_post_vf_bulletin(bp, vfidx);
2862         if (rc)
2863                 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2864         mutex_unlock(&bp->vfdb->bulletin_mutex);
2865 
2866         /* is vf initialized and queue set up? */
2867         if (vf->state != VF_ENABLED ||
2868             bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2869             BNX2X_Q_LOGICAL_STATE_ACTIVE)
2870                 return rc;
2871 
2872         /* User should be able to see error in system logs */
2873         if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2874                 return -EINVAL;
2875 
2876         /* must lock vfpf channel to protect against vf flows */
2877         bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2878 
2879         /* remove existing vlans */
2880         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2881         vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2882         rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2883                                   &ramrod_flags);
2884         if (rc) {
2885                 BNX2X_ERR("failed to delete vlans\n");
2886                 rc = -EINVAL;
2887                 goto out;
2888         }
2889 
2890         /* clear accept_any_vlan when HV forces vlan, otherwise
2891          * according to VF capabilities
2892          */
2893         if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2894                 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2895 
2896         rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2897         if (rc)
2898                 goto out;
2899 
2900         /* send queue update ramrods to configure default vlan and
2901          * silent vlan removal
2902          */
2903         for_each_vfq(vf, i) {
2904                 struct bnx2x_queue_state_params q_params = {NULL};
2905                 struct bnx2x_queue_update_params *update_params;
2906 
2907                 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2908 
2909                 /* validate the Q is UP */
2910                 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2911                     BNX2X_Q_LOGICAL_STATE_ACTIVE)
2912                         continue;
2913 
2914                 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2915                 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2916                 update_params = &q_params.params.update;
2917                 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2918                           &update_params->update_flags);
2919                 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2920                           &update_params->update_flags);
2921                 if (vlan == 0) {
2922                         /* if vlan is 0 then we want to leave the VF traffic
2923                          * untagged, and leave the incoming traffic untouched
2924                          * (i.e. do not remove any vlan tags).
2925                          */
2926                         __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2927                                     &update_params->update_flags);
2928                         __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2929                                     &update_params->update_flags);
2930                 } else {
2931                         /* configure default vlan to vf queue and set silent
2932                          * vlan removal (the vf remains unaware of this vlan).
2933                          */
2934                         __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2935                                   &update_params->update_flags);
2936                         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2937                                   &update_params->update_flags);
2938                         update_params->def_vlan = vlan;
2939                         update_params->silent_removal_value =
2940                                 vlan & VLAN_VID_MASK;
2941                         update_params->silent_removal_mask = VLAN_VID_MASK;
2942                 }
2943 
2944                 /* Update the Queue state */
2945                 rc = bnx2x_queue_state_change(bp, &q_params);
2946                 if (rc) {
2947                         BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2948                                   i);
2949                         goto out;
2950                 }
2951         }
2952 out:
2953         bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2954 
2955         if (rc)
2956                 DP(BNX2X_MSG_IOV,
2957                    "updated VF[%d] vlan configuration (vlan = %d)\n",
2958                    vfidx, vlan);
2959 
2960         return rc;
2961 }
2962 
2963 int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val)
2964 {
2965         struct bnx2x *bp = netdev_priv(dev);
2966         struct bnx2x_virtf *vf;
2967         int i, rc = 0;
2968 
2969         vf = BP_VF(bp, idx);
2970         if (!vf)
2971                 return -EINVAL;
2972 
2973         /* nothing to do */
2974         if (vf->spoofchk == val)
2975                 return 0;
2976 
2977         vf->spoofchk = val ? 1 : 0;
2978 
2979         DP(BNX2X_MSG_IOV, "%s spoofchk for VF %d\n",
2980            val ? "enabling" : "disabling", idx);
2981 
2982         /* is vf initialized and queue set up? */
2983         if (vf->state != VF_ENABLED ||
2984             bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2985             BNX2X_Q_LOGICAL_STATE_ACTIVE)
2986                 return rc;
2987 
2988         /* User should be able to see error in system logs */
2989         if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2990                 return -EINVAL;
2991 
2992         /* send queue update ramrods to configure spoofchk */
2993         for_each_vfq(vf, i) {
2994                 struct bnx2x_queue_state_params q_params = {NULL};
2995                 struct bnx2x_queue_update_params *update_params;
2996 
2997                 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2998 
2999                 /* validate the Q is UP */
3000                 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
3001                     BNX2X_Q_LOGICAL_STATE_ACTIVE)
3002                         continue;
3003 
3004                 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3005                 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3006                 update_params = &q_params.params.update;
3007                 __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
3008                           &update_params->update_flags);
3009                 if (val) {
3010                         __set_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
3011                                   &update_params->update_flags);
3012                 } else {
3013                         __clear_bit(BNX2X_Q_UPDATE_ANTI_SPOOF,
3014                                     &update_params->update_flags);
3015                 }
3016 
3017                 /* Update the Queue state */
3018                 rc = bnx2x_queue_state_change(bp, &q_params);
3019                 if (rc) {
3020                         BNX2X_ERR("Failed to %s spoofchk on VF %d - vfq %d\n",
3021                                   val ? "enable" : "disable", idx, i);
3022                         goto out;
3023                 }
3024         }
3025 out:
3026         if (!rc)
3027                 DP(BNX2X_MSG_IOV,
3028                    "%s spoofchk for VF[%d]\n", val ? "Enabled" : "Disabled",
3029                    idx);
3030 
3031         return rc;
3032 }
3033 
3034 /* crc is the first field in the bulletin board. Compute the crc over the
3035  * entire bulletin board excluding the crc field itself. Use the length field
3036  * as the Bulletin Board was posted by a PF with possibly a different version
3037  * from the vf which will sample it. Therefore, the length is computed by the
3038  * PF and then used blindly by the VF.
3039  */
3040 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
3041 {
3042         return crc32(BULLETIN_CRC_SEED,
3043                  ((u8 *)bulletin) + sizeof(bulletin->crc),
3044                  bulletin->length - sizeof(bulletin->crc));
3045 }
3046 
3047 /* Check for new posts on the bulletin board */
3048 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3049 {
3050         struct pf_vf_bulletin_content *bulletin;
3051         int attempts;
3052 
3053         /* sampling structure in mid post may result with corrupted data
3054          * validate crc to ensure coherency.
3055          */
3056         for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3057                 u32 crc;
3058 
3059                 /* sample the bulletin board */
3060                 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
3061                        sizeof(union pf_vf_bulletin));
3062 
3063                 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
3064 
3065                 if (bp->shadow_bulletin.content.crc == crc)
3066                         break;
3067 
3068                 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3069                           bp->shadow_bulletin.content.crc, crc);
3070         }
3071 
3072         if (attempts >= BULLETIN_ATTEMPTS) {
3073                 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3074                           attempts);
3075                 return PFVF_BULLETIN_CRC_ERR;
3076         }
3077         bulletin = &bp->shadow_bulletin.content;
3078 
3079         /* bulletin board hasn't changed since last sample */
3080         if (bp->old_bulletin.version == bulletin->version)
3081                 return PFVF_BULLETIN_UNCHANGED;
3082 
3083         /* the mac address in bulletin board is valid and is new */
3084         if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
3085             !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3086                 /* update new mac to net device */
3087                 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3088         }
3089 
3090         if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3091                 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3092                    bulletin->link_speed, bulletin->link_flags);
3093 
3094                 bp->vf_link_vars.line_speed = bulletin->link_speed;
3095                 bp->vf_link_vars.link_report_flags = 0;
3096                 /* Link is down */
3097                 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3098                         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3099                                   &bp->vf_link_vars.link_report_flags);
3100                 /* Full DUPLEX */
3101                 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3102                         __set_bit(BNX2X_LINK_REPORT_FD,
3103                                   &bp->vf_link_vars.link_report_flags);
3104                 /* Rx Flow Control is ON */
3105                 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3106                         __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3107                                   &bp->vf_link_vars.link_report_flags);
3108                 /* Tx Flow Control is ON */
3109                 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3110                         __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3111                                   &bp->vf_link_vars.link_report_flags);
3112                 __bnx2x_link_report(bp);
3113         }
3114 
3115         /* copy new bulletin board to bp */
3116         memcpy(&bp->old_bulletin, bulletin,
3117                sizeof(struct pf_vf_bulletin_content));
3118 
3119         return PFVF_BULLETIN_UPDATED;
3120 }
3121 
3122 void bnx2x_timer_sriov(struct bnx2x *bp)
3123 {
3124         bnx2x_sample_bulletin(bp);
3125 
3126         /* if channel is down we need to self destruct */
3127         if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3128                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3129                                        BNX2X_MSG_IOV);
3130 }
3131 
3132 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3133 {
3134         /* vf doorbells are embedded within the regview */
3135         return bp->regview + PXP_VF_ADDR_DB_START;
3136 }
3137 
3138 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3139 {
3140         BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3141                        sizeof(struct bnx2x_vf_mbx_msg));
3142         BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3143                        sizeof(union pf_vf_bulletin));
3144 }
3145 
3146 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3147 {
3148         mutex_init(&bp->vf2pf_mutex);
3149 
3150         /* allocate vf2pf mailbox for vf to pf channel */
3151         bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3152                                          sizeof(struct bnx2x_vf_mbx_msg));
3153         if (!bp->vf2pf_mbox)
3154                 goto alloc_mem_err;
3155 
3156         /* allocate pf 2 vf bulletin board */
3157         bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3158                                              sizeof(union pf_vf_bulletin));
3159         if (!bp->pf2vf_bulletin)
3160                 goto alloc_mem_err;
3161 
3162         bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3163 
3164         return 0;
3165 
3166 alloc_mem_err:
3167         bnx2x_vf_pci_dealloc(bp);
3168         return -ENOMEM;
3169 }
3170 
3171 void bnx2x_iov_channel_down(struct bnx2x *bp)
3172 {
3173         int vf_idx;
3174         struct pf_vf_bulletin_content *bulletin;
3175 
3176         if (!IS_SRIOV(bp))
3177                 return;
3178 
3179         for_each_vf(bp, vf_idx) {
3180                 /* locate this VFs bulletin board and update the channel down
3181                  * bit
3182                  */
3183                 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3184                 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3185 
3186                 /* update vf bulletin board */
3187                 bnx2x_post_vf_bulletin(bp, vf_idx);
3188         }
3189 }
3190 
3191 void bnx2x_iov_task(struct work_struct *work)
3192 {
3193         struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3194 
3195         if (!netif_running(bp->dev))
3196                 return;
3197 
3198         if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3199                                &bp->iov_task_state))
3200                 bnx2x_vf_handle_flr_event(bp);
3201 
3202         if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3203                                &bp->iov_task_state))
3204                 bnx2x_vf_mbx(bp);
3205 }
3206 
3207 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3208 {
3209         smp_mb__before_atomic();
3210         set_bit(flag, &bp->iov_task_state);
3211         smp_mb__after_atomic();
3212         DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3213         queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);
3214 }

/* [<][>][^][v][top][bottom][index][help] */