root/drivers/net/ethernet/qlogic/qed/qed_cxt.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. src_proto
  2. tm_cid_proto
  3. tm_tid_proto
  4. qed_cxt_cdu_iids
  5. qed_cxt_src_iids
  6. qed_cxt_tm_iids
  7. qed_cxt_qm_iids
  8. qed_cxt_tid_seg_info
  9. qed_cxt_set_srq_count
  10. qed_cxt_get_srq_count
  11. qed_cxt_set_proto_cid_count
  12. qed_cxt_get_proto_cid_count
  13. qed_cxt_get_proto_cid_start
  14. qed_cxt_get_proto_tid_count
  15. qed_cxt_set_proto_tid_count
  16. qed_ilt_cli_blk_fill
  17. qed_ilt_cli_adv_line
  18. qed_ilt_get_dynamic_line_cnt
  19. qed_cxt_set_cli
  20. qed_cxt_set_blk
  21. qed_cxt_cfg_ilt_compute
  22. qed_cxt_cfg_ilt_compute_excess
  23. qed_cxt_src_t2_free
  24. qed_cxt_src_t2_alloc
  25. qed_cxt_ilt_shadow_size
  26. qed_ilt_shadow_free
  27. qed_ilt_blk_alloc
  28. qed_ilt_shadow_alloc
  29. qed_cid_map_free
  30. qed_cid_map_alloc_single
  31. qed_cid_map_alloc
  32. qed_cxt_mngr_alloc
  33. qed_cxt_tables_alloc
  34. qed_cxt_mngr_free
  35. qed_cxt_mngr_setup
  36. qed_cdu_init_common
  37. qed_cdu_init_pf
  38. qed_qm_init_pf
  39. qed_cm_init_pf
  40. qed_dq_init_pf
  41. qed_ilt_bounds_init
  42. qed_ilt_vf_bounds_init
  43. qed_ilt_init_pf
  44. qed_src_init_pf
  45. qed_tm_init_pf
  46. qed_prs_init_common
  47. qed_prs_init_pf
  48. qed_cxt_hw_init_common
  49. qed_cxt_hw_init_pf
  50. _qed_cxt_acquire_cid
  51. qed_cxt_acquire_cid
  52. qed_cxt_test_cid_acquired
  53. _qed_cxt_release_cid
  54. qed_cxt_release_cid
  55. qed_cxt_get_cid_info
  56. qed_rdma_set_pf_params
  57. qed_cxt_set_pf_params
  58. qed_cxt_get_tid_mem_info
  59. qed_cxt_dynamic_ilt_alloc
  60. qed_cxt_free_ilt_range
  61. qed_cxt_free_proto_ilt
  62. qed_cxt_get_task_ctx

   1 /* QLogic qed NIC Driver
   2  * Copyright (c) 2015-2017  QLogic Corporation
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and /or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 
  33 #include <linux/types.h>
  34 #include <linux/bitops.h>
  35 #include <linux/dma-mapping.h>
  36 #include <linux/errno.h>
  37 #include <linux/kernel.h>
  38 #include <linux/list.h>
  39 #include <linux/log2.h>
  40 #include <linux/pci.h>
  41 #include <linux/slab.h>
  42 #include <linux/string.h>
  43 #include "qed.h"
  44 #include "qed_cxt.h"
  45 #include "qed_dev_api.h"
  46 #include "qed_hsi.h"
  47 #include "qed_hw.h"
  48 #include "qed_init_ops.h"
  49 #include "qed_rdma.h"
  50 #include "qed_reg_addr.h"
  51 #include "qed_sriov.h"
  52 
  53 /* Max number of connection types in HW (DQ/CDU etc.) */
  54 #define MAX_CONN_TYPES          PROTOCOLID_COMMON
  55 #define NUM_TASK_TYPES          2
  56 #define NUM_TASK_PF_SEGMENTS    4
  57 #define NUM_TASK_VF_SEGMENTS    1
  58 
  59 /* QM constants */
  60 #define QM_PQ_ELEMENT_SIZE      4 /* in bytes */
  61 
  62 /* Doorbell-Queue constants */
  63 #define DQ_RANGE_SHIFT          4
  64 #define DQ_RANGE_ALIGN          BIT(DQ_RANGE_SHIFT)
  65 
  66 /* Searcher constants */
  67 #define SRC_MIN_NUM_ELEMS 256
  68 
  69 /* Timers constants */
  70 #define TM_SHIFT        7
  71 #define TM_ALIGN        BIT(TM_SHIFT)
  72 #define TM_ELEM_SIZE    4
  73 
  74 #define ILT_DEFAULT_HW_P_SIZE   4
  75 
  76 #define ILT_PAGE_IN_BYTES(hw_p_size)    (1U << ((hw_p_size) + 12))
  77 #define ILT_CFG_REG(cli, reg)   PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
  78 
  79 /* ILT entry structure */
  80 #define ILT_ENTRY_PHY_ADDR_MASK         (~0ULL >> 12)
  81 #define ILT_ENTRY_PHY_ADDR_SHIFT        0
  82 #define ILT_ENTRY_VALID_MASK            0x1ULL
  83 #define ILT_ENTRY_VALID_SHIFT           52
  84 #define ILT_ENTRY_IN_REGS               2
  85 #define ILT_REG_SIZE_IN_BYTES           4
  86 
  87 /* connection context union */
  88 union conn_context {
  89         struct e4_core_conn_context core_ctx;
  90         struct e4_eth_conn_context eth_ctx;
  91         struct e4_iscsi_conn_context iscsi_ctx;
  92         struct e4_fcoe_conn_context fcoe_ctx;
  93         struct e4_roce_conn_context roce_ctx;
  94 };
  95 
  96 /* TYPE-0 task context - iSCSI, FCOE */
  97 union type0_task_context {
  98         struct e4_iscsi_task_context iscsi_ctx;
  99         struct e4_fcoe_task_context fcoe_ctx;
 100 };
 101 
 102 /* TYPE-1 task context - ROCE */
 103 union type1_task_context {
 104         struct e4_rdma_task_context roce_ctx;
 105 };
 106 
 107 struct src_ent {
 108         u8 opaque[56];
 109         u64 next;
 110 };
 111 
 112 #define CDUT_SEG_ALIGNMET               3 /* in 4k chunks */
 113 #define CDUT_SEG_ALIGNMET_IN_BYTES      BIT(CDUT_SEG_ALIGNMET + 12)
 114 
 115 #define CONN_CXT_SIZE(p_hwfn) \
 116         ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
 117 
 118 #define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
 119 
 120 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
 121         ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
 122 
 123 /* Alignment is inherent to the type1_task_context structure */
 124 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
 125 
 126 /* PF per protocl configuration object */
 127 #define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
 128 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
 129 
 130 struct qed_tid_seg {
 131         u32 count;
 132         u8 type;
 133         bool has_fl_mem;
 134 };
 135 
 136 struct qed_conn_type_cfg {
 137         u32 cid_count;
 138         u32 cids_per_vf;
 139         struct qed_tid_seg tid_seg[TASK_SEGMENTS];
 140 };
 141 
 142 /* ILT Client configuration, Per connection type (protocol) resources. */
 143 #define ILT_CLI_PF_BLOCKS       (1 + NUM_TASK_PF_SEGMENTS * 2)
 144 #define ILT_CLI_VF_BLOCKS       (1 + NUM_TASK_VF_SEGMENTS * 2)
 145 #define CDUC_BLK                (0)
 146 #define SRQ_BLK                 (0)
 147 #define CDUT_SEG_BLK(n)         (1 + (u8)(n))
 148 #define CDUT_FL_SEG_BLK(n, X)   (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
 149 
 150 enum ilt_clients {
 151         ILT_CLI_CDUC,
 152         ILT_CLI_CDUT,
 153         ILT_CLI_QM,
 154         ILT_CLI_TM,
 155         ILT_CLI_SRC,
 156         ILT_CLI_TSDM,
 157         ILT_CLI_MAX
 158 };
 159 
 160 struct ilt_cfg_pair {
 161         u32 reg;
 162         u32 val;
 163 };
 164 
 165 struct qed_ilt_cli_blk {
 166         u32 total_size; /* 0 means not active */
 167         u32 real_size_in_page;
 168         u32 start_line;
 169         u32 dynamic_line_cnt;
 170 };
 171 
 172 struct qed_ilt_client_cfg {
 173         bool active;
 174 
 175         /* ILT boundaries */
 176         struct ilt_cfg_pair first;
 177         struct ilt_cfg_pair last;
 178         struct ilt_cfg_pair p_size;
 179 
 180         /* ILT client blocks for PF */
 181         struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
 182         u32 pf_total_lines;
 183 
 184         /* ILT client blocks for VFs */
 185         struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
 186         u32 vf_total_lines;
 187 };
 188 
 189 /* Per Path -
 190  *      ILT shadow table
 191  *      Protocol acquired CID lists
 192  *      PF start line in ILT
 193  */
 194 struct qed_dma_mem {
 195         dma_addr_t p_phys;
 196         void *p_virt;
 197         size_t size;
 198 };
 199 
 200 struct qed_cid_acquired_map {
 201         u32             start_cid;
 202         u32             max_count;
 203         unsigned long   *cid_map;
 204 };
 205 
 206 struct qed_cxt_mngr {
 207         /* Per protocl configuration */
 208         struct qed_conn_type_cfg        conn_cfg[MAX_CONN_TYPES];
 209 
 210         /* computed ILT structure */
 211         struct qed_ilt_client_cfg       clients[ILT_CLI_MAX];
 212 
 213         /* Task type sizes */
 214         u32 task_type_size[NUM_TASK_TYPES];
 215 
 216         /* total number of VFs for this hwfn -
 217          * ALL VFs are symmetric in terms of HW resources
 218          */
 219         u32                             vf_count;
 220 
 221         /* Acquired CIDs */
 222         struct qed_cid_acquired_map     acquired[MAX_CONN_TYPES];
 223 
 224         struct qed_cid_acquired_map
 225         acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
 226 
 227         /* ILT  shadow table */
 228         struct qed_dma_mem              *ilt_shadow;
 229         u32                             pf_start_line;
 230 
 231         /* Mutex for a dynamic ILT allocation */
 232         struct mutex mutex;
 233 
 234         /* SRC T2 */
 235         struct qed_dma_mem *t2;
 236         u32 t2_num_pages;
 237         u64 first_free;
 238         u64 last_free;
 239 
 240         /* total number of SRQ's for this hwfn */
 241         u32 srq_count;
 242 
 243         /* Maximal number of L2 steering filters */
 244         u32 arfs_count;
 245 };
 246 static bool src_proto(enum protocol_type type)
 247 {
 248         return type == PROTOCOLID_ISCSI ||
 249                type == PROTOCOLID_FCOE ||
 250                type == PROTOCOLID_IWARP;
 251 }
 252 
 253 static bool tm_cid_proto(enum protocol_type type)
 254 {
 255         return type == PROTOCOLID_ISCSI ||
 256                type == PROTOCOLID_FCOE ||
 257                type == PROTOCOLID_ROCE ||
 258                type == PROTOCOLID_IWARP;
 259 }
 260 
 261 static bool tm_tid_proto(enum protocol_type type)
 262 {
 263         return type == PROTOCOLID_FCOE;
 264 }
 265 
 266 /* counts the iids for the CDU/CDUC ILT client configuration */
 267 struct qed_cdu_iids {
 268         u32 pf_cids;
 269         u32 per_vf_cids;
 270 };
 271 
 272 static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
 273                              struct qed_cdu_iids *iids)
 274 {
 275         u32 type;
 276 
 277         for (type = 0; type < MAX_CONN_TYPES; type++) {
 278                 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
 279                 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 280         }
 281 }
 282 
 283 /* counts the iids for the Searcher block configuration */
 284 struct qed_src_iids {
 285         u32 pf_cids;
 286         u32 per_vf_cids;
 287 };
 288 
 289 static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
 290                              struct qed_src_iids *iids)
 291 {
 292         u32 i;
 293 
 294         for (i = 0; i < MAX_CONN_TYPES; i++) {
 295                 if (!src_proto(i))
 296                         continue;
 297 
 298                 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
 299                 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
 300         }
 301 
 302         /* Add L2 filtering filters in addition */
 303         iids->pf_cids += p_mngr->arfs_count;
 304 }
 305 
 306 /* counts the iids for the Timers block configuration */
 307 struct qed_tm_iids {
 308         u32 pf_cids;
 309         u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
 310         u32 pf_tids_total;
 311         u32 per_vf_cids;
 312         u32 per_vf_tids;
 313 };
 314 
 315 static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
 316                             struct qed_cxt_mngr *p_mngr,
 317                             struct qed_tm_iids *iids)
 318 {
 319         bool tm_vf_required = false;
 320         bool tm_required = false;
 321         int i, j;
 322 
 323         /* Timers is a special case -> we don't count how many cids require
 324          * timers but what's the max cid that will be used by the timer block.
 325          * therefore we traverse in reverse order, and once we hit a protocol
 326          * that requires the timers memory, we'll sum all the protocols up
 327          * to that one.
 328          */
 329         for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
 330                 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
 331 
 332                 if (tm_cid_proto(i) || tm_required) {
 333                         if (p_cfg->cid_count)
 334                                 tm_required = true;
 335 
 336                         iids->pf_cids += p_cfg->cid_count;
 337                 }
 338 
 339                 if (tm_cid_proto(i) || tm_vf_required) {
 340                         if (p_cfg->cids_per_vf)
 341                                 tm_vf_required = true;
 342 
 343                         iids->per_vf_cids += p_cfg->cids_per_vf;
 344                 }
 345 
 346                 if (tm_tid_proto(i)) {
 347                         struct qed_tid_seg *segs = p_cfg->tid_seg;
 348 
 349                         /* for each segment there is at most one
 350                          * protocol for which count is not 0.
 351                          */
 352                         for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
 353                                 iids->pf_tids[j] += segs[j].count;
 354 
 355                         /* The last array elelment is for the VFs. As for PF
 356                          * segments there can be only one protocol for
 357                          * which this value is not 0.
 358                          */
 359                         iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 360                 }
 361         }
 362 
 363         iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
 364         iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
 365         iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
 366 
 367         for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
 368                 iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
 369                 iids->pf_tids_total += iids->pf_tids[j];
 370         }
 371 }
 372 
 373 static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
 374                             struct qed_qm_iids *iids)
 375 {
 376         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 377         struct qed_tid_seg *segs;
 378         u32 vf_cids = 0, type, j;
 379         u32 vf_tids = 0;
 380 
 381         for (type = 0; type < MAX_CONN_TYPES; type++) {
 382                 iids->cids += p_mngr->conn_cfg[type].cid_count;
 383                 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
 384 
 385                 segs = p_mngr->conn_cfg[type].tid_seg;
 386                 /* for each segment there is at most one
 387                  * protocol for which count is not 0.
 388                  */
 389                 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
 390                         iids->tids += segs[j].count;
 391 
 392                 /* The last array elelment is for the VFs. As for PF
 393                  * segments there can be only one protocol for
 394                  * which this value is not 0.
 395                  */
 396                 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
 397         }
 398 
 399         iids->vf_cids += vf_cids * p_mngr->vf_count;
 400         iids->tids += vf_tids * p_mngr->vf_count;
 401 
 402         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 403                    "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
 404                    iids->cids, iids->vf_cids, iids->tids, vf_tids);
 405 }
 406 
 407 static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
 408                                                 u32 seg)
 409 {
 410         struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
 411         u32 i;
 412 
 413         /* Find the protocol with tid count > 0 for this segment.
 414          * Note: there can only be one and this is already validated.
 415          */
 416         for (i = 0; i < MAX_CONN_TYPES; i++)
 417                 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
 418                         return &p_cfg->conn_cfg[i].tid_seg[seg];
 419         return NULL;
 420 }
 421 
 422 static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
 423 {
 424         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 425 
 426         p_mgr->srq_count = num_srqs;
 427 }
 428 
 429 u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
 430 {
 431         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 432 
 433         return p_mgr->srq_count;
 434 }
 435 
 436 /* set the iids count per protocol */
 437 static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
 438                                         enum protocol_type type,
 439                                         u32 cid_count, u32 vf_cid_cnt)
 440 {
 441         struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
 442         struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
 443 
 444         p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
 445         p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
 446 
 447         if (type == PROTOCOLID_ROCE) {
 448                 u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
 449                 u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
 450                 u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
 451                 u32 align = elems_per_page * DQ_RANGE_ALIGN;
 452 
 453                 p_conn->cid_count = roundup(p_conn->cid_count, align);
 454         }
 455 }
 456 
 457 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
 458                                 enum protocol_type type, u32 *vf_cid)
 459 {
 460         if (vf_cid)
 461                 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
 462 
 463         return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
 464 }
 465 
 466 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
 467                                 enum protocol_type type)
 468 {
 469         return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
 470 }
 471 
 472 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
 473                                 enum protocol_type type)
 474 {
 475         u32 cnt = 0;
 476         int i;
 477 
 478         for (i = 0; i < TASK_SEGMENTS; i++)
 479                 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
 480 
 481         return cnt;
 482 }
 483 
 484 static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
 485                                         enum protocol_type proto,
 486                                         u8 seg,
 487                                         u8 seg_type, u32 count, bool has_fl)
 488 {
 489         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 490         struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
 491 
 492         p_seg->count = count;
 493         p_seg->has_fl_mem = has_fl;
 494         p_seg->type = seg_type;
 495 }
 496 
 497 static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
 498                                  struct qed_ilt_cli_blk *p_blk,
 499                                  u32 start_line, u32 total_size, u32 elem_size)
 500 {
 501         u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 502 
 503         /* verify thatits called only once for each block */
 504         if (p_blk->total_size)
 505                 return;
 506 
 507         p_blk->total_size = total_size;
 508         p_blk->real_size_in_page = 0;
 509         if (elem_size)
 510                 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
 511         p_blk->start_line = start_line;
 512 }
 513 
 514 static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
 515                                  struct qed_ilt_client_cfg *p_cli,
 516                                  struct qed_ilt_cli_blk *p_blk,
 517                                  u32 *p_line, enum ilt_clients client_id)
 518 {
 519         if (!p_blk->total_size)
 520                 return;
 521 
 522         if (!p_cli->active)
 523                 p_cli->first.val = *p_line;
 524 
 525         p_cli->active = true;
 526         *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
 527         p_cli->last.val = *p_line - 1;
 528 
 529         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 530                    "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
 531                    client_id, p_cli->first.val,
 532                    p_cli->last.val, p_blk->total_size,
 533                    p_blk->real_size_in_page, p_blk->start_line);
 534 }
 535 
 536 static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
 537                                         enum ilt_clients ilt_client)
 538 {
 539         u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
 540         struct qed_ilt_client_cfg *p_cli;
 541         u32 lines_to_skip = 0;
 542         u32 cxts_per_p;
 543 
 544         if (ilt_client == ILT_CLI_CDUC) {
 545                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
 546 
 547                 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
 548                     (u32) CONN_CXT_SIZE(p_hwfn);
 549 
 550                 lines_to_skip = cid_count / cxts_per_p;
 551         }
 552 
 553         return lines_to_skip;
 554 }
 555 
 556 static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
 557                                                   *p_cli)
 558 {
 559         p_cli->active = false;
 560         p_cli->first.val = 0;
 561         p_cli->last.val = 0;
 562         return p_cli;
 563 }
 564 
 565 static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
 566 {
 567         p_blk->total_size = 0;
 568         return p_blk;
 569 }
 570 
 571 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 572 {
 573         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 574         u32 curr_line, total, i, task_size, line;
 575         struct qed_ilt_client_cfg *p_cli;
 576         struct qed_ilt_cli_blk *p_blk;
 577         struct qed_cdu_iids cdu_iids;
 578         struct qed_src_iids src_iids;
 579         struct qed_qm_iids qm_iids;
 580         struct qed_tm_iids tm_iids;
 581         struct qed_tid_seg *p_seg;
 582 
 583         memset(&qm_iids, 0, sizeof(qm_iids));
 584         memset(&cdu_iids, 0, sizeof(cdu_iids));
 585         memset(&src_iids, 0, sizeof(src_iids));
 586         memset(&tm_iids, 0, sizeof(tm_iids));
 587 
 588         p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 589 
 590         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
 591                    "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
 592                    p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
 593 
 594         /* CDUC */
 595         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
 596 
 597         curr_line = p_mngr->pf_start_line;
 598 
 599         /* CDUC PF */
 600         p_cli->pf_total_lines = 0;
 601 
 602         /* get the counters for the CDUC and QM clients  */
 603         qed_cxt_cdu_iids(p_mngr, &cdu_iids);
 604 
 605         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
 606 
 607         total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
 608 
 609         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 610                              total, CONN_CXT_SIZE(p_hwfn));
 611 
 612         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 613         p_cli->pf_total_lines = curr_line - p_blk->start_line;
 614 
 615         p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
 616                                                                ILT_CLI_CDUC);
 617 
 618         /* CDUC VF */
 619         p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
 620         total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
 621 
 622         qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 623                              total, CONN_CXT_SIZE(p_hwfn));
 624 
 625         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
 626         p_cli->vf_total_lines = curr_line - p_blk->start_line;
 627 
 628         for (i = 1; i < p_mngr->vf_count; i++)
 629                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 630                                      ILT_CLI_CDUC);
 631 
 632         /* CDUT PF */
 633         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
 634         p_cli->first.val = curr_line;
 635 
 636         /* first the 'working' task memory */
 637         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 638                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 639                 if (!p_seg || p_seg->count == 0)
 640                         continue;
 641 
 642                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
 643                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 644                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
 645                                      p_mngr->task_type_size[p_seg->type]);
 646 
 647                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 648                                      ILT_CLI_CDUT);
 649         }
 650 
 651         /* next the 'init' task memory (forced load memory) */
 652         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 653                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 654                 if (!p_seg || p_seg->count == 0)
 655                         continue;
 656 
 657                 p_blk =
 658                     qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
 659 
 660                 if (!p_seg->has_fl_mem) {
 661                         /* The segment is active (total size pf 'working'
 662                          * memory is > 0) but has no FL (forced-load, Init)
 663                          * memory. Thus:
 664                          *
 665                          * 1.   The total-size in the corrsponding FL block of
 666                          *      the ILT client is set to 0 - No ILT line are
 667                          *      provisioned and no ILT memory allocated.
 668                          *
 669                          * 2.   The start-line of said block is set to the
 670                          *      start line of the matching working memory
 671                          *      block in the ILT client. This is later used to
 672                          *      configure the CDU segment offset registers and
 673                          *      results in an FL command for TIDs of this
 674                          *      segement behaves as regular load commands
 675                          *      (loading TIDs from the working memory).
 676                          */
 677                         line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
 678 
 679                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 680                         continue;
 681                 }
 682                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 683 
 684                 qed_ilt_cli_blk_fill(p_cli, p_blk,
 685                                      curr_line, total,
 686                                      p_mngr->task_type_size[p_seg->type]);
 687 
 688                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 689                                      ILT_CLI_CDUT);
 690         }
 691         p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
 692 
 693         /* CDUT VF */
 694         p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
 695         if (p_seg && p_seg->count) {
 696                 /* Stricly speaking we need to iterate over all VF
 697                  * task segment types, but a VF has only 1 segment
 698                  */
 699 
 700                 /* 'working' memory */
 701                 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
 702 
 703                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
 704                 qed_ilt_cli_blk_fill(p_cli, p_blk,
 705                                      curr_line, total,
 706                                      p_mngr->task_type_size[p_seg->type]);
 707 
 708                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 709                                      ILT_CLI_CDUT);
 710 
 711                 /* 'init' memory */
 712                 p_blk =
 713                     qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
 714                 if (!p_seg->has_fl_mem) {
 715                         /* see comment above */
 716                         line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
 717                         qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
 718                 } else {
 719                         task_size = p_mngr->task_type_size[p_seg->type];
 720                         qed_ilt_cli_blk_fill(p_cli, p_blk,
 721                                              curr_line, total, task_size);
 722                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 723                                              ILT_CLI_CDUT);
 724                 }
 725                 p_cli->vf_total_lines = curr_line -
 726                     p_cli->vf_blks[0].start_line;
 727 
 728                 /* Now for the rest of the VFs */
 729                 for (i = 1; i < p_mngr->vf_count; i++) {
 730                         p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
 731                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 732                                              ILT_CLI_CDUT);
 733 
 734                         p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
 735                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 736                                              ILT_CLI_CDUT);
 737                 }
 738         }
 739 
 740         /* QM */
 741         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
 742         p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 743 
 744         qed_cxt_qm_iids(p_hwfn, &qm_iids);
 745         total = qed_qm_pf_mem_size(qm_iids.cids,
 746                                    qm_iids.vf_cids, qm_iids.tids,
 747                                    p_hwfn->qm_info.num_pqs,
 748                                    p_hwfn->qm_info.num_vf_pqs);
 749 
 750         DP_VERBOSE(p_hwfn,
 751                    QED_MSG_ILT,
 752                    "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
 753                    qm_iids.cids,
 754                    qm_iids.vf_cids,
 755                    qm_iids.tids,
 756                    p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
 757 
 758         qed_ilt_cli_blk_fill(p_cli, p_blk,
 759                              curr_line, total * 0x1000,
 760                              QM_PQ_ELEMENT_SIZE);
 761 
 762         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
 763         p_cli->pf_total_lines = curr_line - p_blk->start_line;
 764 
 765         /* SRC */
 766         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
 767         qed_cxt_src_iids(p_mngr, &src_iids);
 768 
 769         /* Both the PF and VFs searcher connections are stored in the per PF
 770          * database. Thus sum the PF searcher cids and all the VFs searcher
 771          * cids.
 772          */
 773         total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 774         if (total) {
 775                 u32 local_max = max_t(u32, total,
 776                                       SRC_MIN_NUM_ELEMS);
 777 
 778                 total = roundup_pow_of_two(local_max);
 779 
 780                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 781                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 782                                      total * sizeof(struct src_ent),
 783                                      sizeof(struct src_ent));
 784 
 785                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 786                                      ILT_CLI_SRC);
 787                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
 788         }
 789 
 790         /* TM PF */
 791         p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
 792         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
 793         total = tm_iids.pf_cids + tm_iids.pf_tids_total;
 794         if (total) {
 795                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
 796                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 797                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 798 
 799                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 800                                      ILT_CLI_TM);
 801                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
 802         }
 803 
 804         /* TM VF */
 805         total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
 806         if (total) {
 807                 p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
 808                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 809                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
 810 
 811                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 812                                      ILT_CLI_TM);
 813 
 814                 p_cli->vf_total_lines = curr_line - p_blk->start_line;
 815                 for (i = 1; i < p_mngr->vf_count; i++)
 816                         qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 817                                              ILT_CLI_TM);
 818         }
 819 
 820         /* TSDM (SRQ CONTEXT) */
 821         total = qed_cxt_get_srq_count(p_hwfn);
 822 
 823         if (total) {
 824                 p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
 825                 p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
 826                 qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
 827                                      total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
 828 
 829                 qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
 830                                      ILT_CLI_TSDM);
 831                 p_cli->pf_total_lines = curr_line - p_blk->start_line;
 832         }
 833 
 834         *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
 835 
 836         if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
 837             RESC_NUM(p_hwfn, QED_ILT))
 838                 return -EINVAL;
 839 
 840         return 0;
 841 }
 842 
 843 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
 844 {
 845         struct qed_ilt_client_cfg *p_cli;
 846         u32 excess_lines, available_lines;
 847         struct qed_cxt_mngr *p_mngr;
 848         u32 ilt_page_size, elem_size;
 849         struct qed_tid_seg *p_seg;
 850         int i;
 851 
 852         available_lines = RESC_NUM(p_hwfn, QED_ILT);
 853         excess_lines = used_lines - available_lines;
 854 
 855         if (!excess_lines)
 856                 return 0;
 857 
 858         if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
 859                 return 0;
 860 
 861         p_mngr = p_hwfn->p_cxt_mngr;
 862         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
 863         ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
 864 
 865         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
 866                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
 867                 if (!p_seg || p_seg->count == 0)
 868                         continue;
 869 
 870                 elem_size = p_mngr->task_type_size[p_seg->type];
 871                 if (!elem_size)
 872                         continue;
 873 
 874                 return (ilt_page_size / elem_size) * excess_lines;
 875         }
 876 
 877         DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
 878         return 0;
 879 }
 880 
 881 static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
 882 {
 883         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 884         u32 i;
 885 
 886         if (!p_mngr->t2)
 887                 return;
 888 
 889         for (i = 0; i < p_mngr->t2_num_pages; i++)
 890                 if (p_mngr->t2[i].p_virt)
 891                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 892                                           p_mngr->t2[i].size,
 893                                           p_mngr->t2[i].p_virt,
 894                                           p_mngr->t2[i].p_phys);
 895 
 896         kfree(p_mngr->t2);
 897         p_mngr->t2 = NULL;
 898 }
 899 
 900 static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
 901 {
 902         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
 903         u32 conn_num, total_size, ent_per_page, psz, i;
 904         struct qed_ilt_client_cfg *p_src;
 905         struct qed_src_iids src_iids;
 906         struct qed_dma_mem *p_t2;
 907         int rc;
 908 
 909         memset(&src_iids, 0, sizeof(src_iids));
 910 
 911         /* if the SRC ILT client is inactive - there are no connection
 912          * requiring the searcer, leave.
 913          */
 914         p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
 915         if (!p_src->active)
 916                 return 0;
 917 
 918         qed_cxt_src_iids(p_mngr, &src_iids);
 919         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
 920         total_size = conn_num * sizeof(struct src_ent);
 921 
 922         /* use the same page size as the SRC ILT client */
 923         psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
 924         p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
 925 
 926         /* allocate t2 */
 927         p_mngr->t2 = kcalloc(p_mngr->t2_num_pages, sizeof(struct qed_dma_mem),
 928                              GFP_KERNEL);
 929         if (!p_mngr->t2) {
 930                 rc = -ENOMEM;
 931                 goto t2_fail;
 932         }
 933 
 934         /* allocate t2 pages */
 935         for (i = 0; i < p_mngr->t2_num_pages; i++) {
 936                 u32 size = min_t(u32, total_size, psz);
 937                 void **p_virt = &p_mngr->t2[i].p_virt;
 938 
 939                 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
 940                                              &p_mngr->t2[i].p_phys,
 941                                              GFP_KERNEL);
 942                 if (!p_mngr->t2[i].p_virt) {
 943                         rc = -ENOMEM;
 944                         goto t2_fail;
 945                 }
 946                 p_mngr->t2[i].size = size;
 947                 total_size -= size;
 948         }
 949 
 950         /* Set the t2 pointers */
 951 
 952         /* entries per page - must be a power of two */
 953         ent_per_page = psz / sizeof(struct src_ent);
 954 
 955         p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
 956 
 957         p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
 958         p_mngr->last_free = (u64) p_t2->p_phys +
 959             ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
 960 
 961         for (i = 0; i < p_mngr->t2_num_pages; i++) {
 962                 u32 ent_num = min_t(u32,
 963                                     ent_per_page,
 964                                     conn_num);
 965                 struct src_ent *entries = p_mngr->t2[i].p_virt;
 966                 u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
 967                 u32 j;
 968 
 969                 for (j = 0; j < ent_num - 1; j++) {
 970                         val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
 971                         entries[j].next = cpu_to_be64(val);
 972                 }
 973 
 974                 if (i < p_mngr->t2_num_pages - 1)
 975                         val = (u64) p_mngr->t2[i + 1].p_phys;
 976                 else
 977                         val = 0;
 978                 entries[j].next = cpu_to_be64(val);
 979 
 980                 conn_num -= ent_num;
 981         }
 982 
 983         return 0;
 984 
 985 t2_fail:
 986         qed_cxt_src_t2_free(p_hwfn);
 987         return rc;
 988 }
 989 
 990 #define for_each_ilt_valid_client(pos, clients) \
 991         for (pos = 0; pos < ILT_CLI_MAX; pos++) \
 992                 if (!clients[pos].active) {     \
 993                         continue;               \
 994                 } else                          \
 995 
 996 /* Total number of ILT lines used by this PF */
 997 static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
 998 {
 999         u32 size = 0;
1000         u32 i;
1001 
1002         for_each_ilt_valid_client(i, ilt_clients)
1003             size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
1004 
1005         return size;
1006 }
1007 
1008 static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
1009 {
1010         struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
1011         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1012         u32 ilt_size, i;
1013 
1014         ilt_size = qed_cxt_ilt_shadow_size(p_cli);
1015 
1016         for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
1017                 struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
1018 
1019                 if (p_dma->p_virt)
1020                         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1021                                           p_dma->size, p_dma->p_virt,
1022                                           p_dma->p_phys);
1023                 p_dma->p_virt = NULL;
1024         }
1025         kfree(p_mngr->ilt_shadow);
1026 }
1027 
1028 static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1029                              struct qed_ilt_cli_blk *p_blk,
1030                              enum ilt_clients ilt_client,
1031                              u32 start_line_offset)
1032 {
1033         struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
1034         u32 lines, line, sz_left, lines_to_skip = 0;
1035 
1036         /* Special handling for RoCE that supports dynamic allocation */
1037         if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
1038             ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
1039                 return 0;
1040 
1041         lines_to_skip = p_blk->dynamic_line_cnt;
1042 
1043         if (!p_blk->total_size)
1044                 return 0;
1045 
1046         sz_left = p_blk->total_size;
1047         lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
1048         line = p_blk->start_line + start_line_offset -
1049             p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
1050 
1051         for (; lines; lines--) {
1052                 dma_addr_t p_phys;
1053                 void *p_virt;
1054                 u32 size;
1055 
1056                 size = min_t(u32, sz_left, p_blk->real_size_in_page);
1057                 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
1058                                             &p_phys, GFP_KERNEL);
1059                 if (!p_virt)
1060                         return -ENOMEM;
1061 
1062                 ilt_shadow[line].p_phys = p_phys;
1063                 ilt_shadow[line].p_virt = p_virt;
1064                 ilt_shadow[line].size = size;
1065 
1066                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1067                            "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
1068                             line, (u64)p_phys, p_virt, size);
1069 
1070                 sz_left -= size;
1071                 line++;
1072         }
1073 
1074         return 0;
1075 }
1076 
1077 static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
1078 {
1079         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1080         struct qed_ilt_client_cfg *clients = p_mngr->clients;
1081         struct qed_ilt_cli_blk *p_blk;
1082         u32 size, i, j, k;
1083         int rc;
1084 
1085         size = qed_cxt_ilt_shadow_size(clients);
1086         p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
1087                                      GFP_KERNEL);
1088         if (!p_mngr->ilt_shadow) {
1089                 rc = -ENOMEM;
1090                 goto ilt_shadow_fail;
1091         }
1092 
1093         DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1094                    "Allocated 0x%x bytes for ilt shadow\n",
1095                    (u32)(size * sizeof(struct qed_dma_mem)));
1096 
1097         for_each_ilt_valid_client(i, clients) {
1098                 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1099                         p_blk = &clients[i].pf_blks[j];
1100                         rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1101                         if (rc)
1102                                 goto ilt_shadow_fail;
1103                 }
1104                 for (k = 0; k < p_mngr->vf_count; k++) {
1105                         for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1106                                 u32 lines = clients[i].vf_total_lines * k;
1107 
1108                                 p_blk = &clients[i].vf_blks[j];
1109                                 rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
1110                                 if (rc)
1111                                         goto ilt_shadow_fail;
1112                         }
1113                 }
1114         }
1115 
1116         return 0;
1117 
1118 ilt_shadow_fail:
1119         qed_ilt_shadow_free(p_hwfn);
1120         return rc;
1121 }
1122 
1123 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
1124 {
1125         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1126         u32 type, vf;
1127 
1128         for (type = 0; type < MAX_CONN_TYPES; type++) {
1129                 kfree(p_mngr->acquired[type].cid_map);
1130                 p_mngr->acquired[type].max_count = 0;
1131                 p_mngr->acquired[type].start_cid = 0;
1132 
1133                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1134                         kfree(p_mngr->acquired_vf[type][vf].cid_map);
1135                         p_mngr->acquired_vf[type][vf].max_count = 0;
1136                         p_mngr->acquired_vf[type][vf].start_cid = 0;
1137                 }
1138         }
1139 }
1140 
1141 static int
1142 qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
1143                          u32 type,
1144                          u32 cid_start,
1145                          u32 cid_count, struct qed_cid_acquired_map *p_map)
1146 {
1147         u32 size;
1148 
1149         if (!cid_count)
1150                 return 0;
1151 
1152         size = DIV_ROUND_UP(cid_count,
1153                             sizeof(unsigned long) * BITS_PER_BYTE) *
1154                sizeof(unsigned long);
1155         p_map->cid_map = kzalloc(size, GFP_KERNEL);
1156         if (!p_map->cid_map)
1157                 return -ENOMEM;
1158 
1159         p_map->max_count = cid_count;
1160         p_map->start_cid = cid_start;
1161 
1162         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1163                    "Type %08x start: %08x count %08x\n",
1164                    type, p_map->start_cid, p_map->max_count);
1165 
1166         return 0;
1167 }
1168 
1169 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
1170 {
1171         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1172         u32 start_cid = 0, vf_start_cid = 0;
1173         u32 type, vf;
1174 
1175         for (type = 0; type < MAX_CONN_TYPES; type++) {
1176                 struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1177                 struct qed_cid_acquired_map *p_map;
1178 
1179                 /* Handle PF maps */
1180                 p_map = &p_mngr->acquired[type];
1181                 if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
1182                                              p_cfg->cid_count, p_map))
1183                         goto cid_map_fail;
1184 
1185                 /* Handle VF maps */
1186                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1187                         p_map = &p_mngr->acquired_vf[type][vf];
1188                         if (qed_cid_map_alloc_single(p_hwfn, type,
1189                                                      vf_start_cid,
1190                                                      p_cfg->cids_per_vf, p_map))
1191                                 goto cid_map_fail;
1192                 }
1193 
1194                 start_cid += p_cfg->cid_count;
1195                 vf_start_cid += p_cfg->cids_per_vf;
1196         }
1197 
1198         return 0;
1199 
1200 cid_map_fail:
1201         qed_cid_map_free(p_hwfn);
1202         return -ENOMEM;
1203 }
1204 
1205 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
1206 {
1207         struct qed_ilt_client_cfg *clients;
1208         struct qed_cxt_mngr *p_mngr;
1209         u32 i;
1210 
1211         p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
1212         if (!p_mngr)
1213                 return -ENOMEM;
1214 
1215         /* Initialize ILT client registers */
1216         clients = p_mngr->clients;
1217         clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1218         clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1219         clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1220 
1221         clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1222         clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1223         clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1224 
1225         clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1226         clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1227         clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1228 
1229         clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1230         clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1231         clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1232 
1233         clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1234         clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1235         clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1236 
1237         clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1238         clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1239         clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1240         /* default ILT page size for all clients is 64K */
1241         for (i = 0; i < ILT_CLI_MAX; i++)
1242                 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1243 
1244         /* Initialize task sizes */
1245         p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
1246         p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
1247 
1248         if (p_hwfn->cdev->p_iov_info)
1249                 p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
1250         /* Initialize the dynamic ILT allocation mutex */
1251         mutex_init(&p_mngr->mutex);
1252 
1253         /* Set the cxt mangr pointer priori to further allocations */
1254         p_hwfn->p_cxt_mngr = p_mngr;
1255 
1256         return 0;
1257 }
1258 
1259 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
1260 {
1261         int rc;
1262 
1263         /* Allocate the ILT shadow table */
1264         rc = qed_ilt_shadow_alloc(p_hwfn);
1265         if (rc)
1266                 goto tables_alloc_fail;
1267 
1268         /* Allocate the T2  table */
1269         rc = qed_cxt_src_t2_alloc(p_hwfn);
1270         if (rc)
1271                 goto tables_alloc_fail;
1272 
1273         /* Allocate and initialize the acquired cids bitmaps */
1274         rc = qed_cid_map_alloc(p_hwfn);
1275         if (rc)
1276                 goto tables_alloc_fail;
1277 
1278         return 0;
1279 
1280 tables_alloc_fail:
1281         qed_cxt_mngr_free(p_hwfn);
1282         return rc;
1283 }
1284 
1285 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
1286 {
1287         if (!p_hwfn->p_cxt_mngr)
1288                 return;
1289 
1290         qed_cid_map_free(p_hwfn);
1291         qed_cxt_src_t2_free(p_hwfn);
1292         qed_ilt_shadow_free(p_hwfn);
1293         kfree(p_hwfn->p_cxt_mngr);
1294 
1295         p_hwfn->p_cxt_mngr = NULL;
1296 }
1297 
1298 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
1299 {
1300         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1301         struct qed_cid_acquired_map *p_map;
1302         struct qed_conn_type_cfg *p_cfg;
1303         int type;
1304         u32 len;
1305 
1306         /* Reset acquired cids */
1307         for (type = 0; type < MAX_CONN_TYPES; type++) {
1308                 u32 vf;
1309 
1310                 p_cfg = &p_mngr->conn_cfg[type];
1311                 if (p_cfg->cid_count) {
1312                         p_map = &p_mngr->acquired[type];
1313                         len = DIV_ROUND_UP(p_map->max_count,
1314                                            sizeof(unsigned long) *
1315                                            BITS_PER_BYTE) *
1316                               sizeof(unsigned long);
1317                         memset(p_map->cid_map, 0, len);
1318                 }
1319 
1320                 if (!p_cfg->cids_per_vf)
1321                         continue;
1322 
1323                 for (vf = 0; vf < MAX_NUM_VFS; vf++) {
1324                         p_map = &p_mngr->acquired_vf[type][vf];
1325                         len = DIV_ROUND_UP(p_map->max_count,
1326                                            sizeof(unsigned long) *
1327                                            BITS_PER_BYTE) *
1328                               sizeof(unsigned long);
1329                         memset(p_map->cid_map, 0, len);
1330                 }
1331         }
1332 }
1333 
1334 /* CDU Common */
1335 #define CDUC_CXT_SIZE_SHIFT \
1336         CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1337 
1338 #define CDUC_CXT_SIZE_MASK \
1339         (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1340 
1341 #define CDUC_BLOCK_WASTE_SHIFT \
1342         CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1343 
1344 #define CDUC_BLOCK_WASTE_MASK \
1345         (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1346 
1347 #define CDUC_NCIB_SHIFT \
1348         CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1349 
1350 #define CDUC_NCIB_MASK \
1351         (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1352 
1353 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1354         CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1355 
1356 #define CDUT_TYPE0_CXT_SIZE_MASK                \
1357         (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1358          CDUT_TYPE0_CXT_SIZE_SHIFT)
1359 
1360 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1361         CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1362 
1363 #define CDUT_TYPE0_BLOCK_WASTE_MASK                    \
1364         (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1365          CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1366 
1367 #define CDUT_TYPE0_NCIB_SHIFT \
1368         CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1369 
1370 #define CDUT_TYPE0_NCIB_MASK                             \
1371         (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1372          CDUT_TYPE0_NCIB_SHIFT)
1373 
1374 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1375         CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1376 
1377 #define CDUT_TYPE1_CXT_SIZE_MASK                \
1378         (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1379          CDUT_TYPE1_CXT_SIZE_SHIFT)
1380 
1381 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1382         CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1383 
1384 #define CDUT_TYPE1_BLOCK_WASTE_MASK                    \
1385         (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1386          CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1387 
1388 #define CDUT_TYPE1_NCIB_SHIFT \
1389         CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1390 
1391 #define CDUT_TYPE1_NCIB_MASK                             \
1392         (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1393          CDUT_TYPE1_NCIB_SHIFT)
1394 
1395 static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
1396 {
1397         u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1398 
1399         /* CDUC - connection configuration */
1400         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1401         cxt_size = CONN_CXT_SIZE(p_hwfn);
1402         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1403         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1404 
1405         SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1406         SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1407         SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1408         STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1409 
1410         /* CDUT - type-0 tasks configuration */
1411         page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1412         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1413         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1414         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1415 
1416         /* cxt size and block-waste are multipes of 8 */
1417         cdu_params = 0;
1418         SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1419         SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1420         SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1421         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1422 
1423         /* CDUT - type-1 tasks configuration */
1424         cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1425         elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1426         block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1427 
1428         /* cxt size and block-waste are multipes of 8 */
1429         cdu_params = 0;
1430         SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1431         SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1432         SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1433         STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1434 }
1435 
1436 /* CDU PF */
1437 #define CDU_SEG_REG_TYPE_SHIFT          CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1438 #define CDU_SEG_REG_TYPE_MASK           0x1
1439 #define CDU_SEG_REG_OFFSET_SHIFT        0
1440 #define CDU_SEG_REG_OFFSET_MASK         CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1441 
1442 static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
1443 {
1444         struct qed_ilt_client_cfg *p_cli;
1445         struct qed_tid_seg *p_seg;
1446         u32 cdu_seg_params, offset;
1447         int i;
1448 
1449         static const u32 rt_type_offset_arr[] = {
1450                 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1451                 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1452                 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1453                 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1454         };
1455 
1456         static const u32 rt_type_offset_fl_arr[] = {
1457                 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1458                 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1459                 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1460                 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1461         };
1462 
1463         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1464 
1465         /* There are initializations only for CDUT during pf Phase */
1466         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1467                 /* Segment 0 */
1468                 p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
1469                 if (!p_seg)
1470                         continue;
1471 
1472                 /* Note: start_line is already adjusted for the CDU
1473                  * segment register granularity, so we just need to
1474                  * divide. Adjustment is implicit as we assume ILT
1475                  * Page size is larger than 32K!
1476                  */
1477                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1478                           (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1479                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1480 
1481                 cdu_seg_params = 0;
1482                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1483                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1484                 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1485 
1486                 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1487                           (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1488                            p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1489 
1490                 cdu_seg_params = 0;
1491                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1492                 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1493                 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1494         }
1495 }
1496 
1497 void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
1498                     struct qed_ptt *p_ptt, bool is_pf_loading)
1499 {
1500         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1501         struct qed_qm_pf_rt_init_params params;
1502         struct qed_mcp_link_state *p_link;
1503         struct qed_qm_iids iids;
1504 
1505         memset(&iids, 0, sizeof(iids));
1506         qed_cxt_qm_iids(p_hwfn, &iids);
1507 
1508         p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
1509 
1510         memset(&params, 0, sizeof(params));
1511         params.port_id = p_hwfn->port_id;
1512         params.pf_id = p_hwfn->rel_pf_id;
1513         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1514         params.is_pf_loading = is_pf_loading;
1515         params.num_pf_cids = iids.cids;
1516         params.num_vf_cids = iids.vf_cids;
1517         params.num_tids = iids.tids;
1518         params.start_pq = qm_info->start_pq;
1519         params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
1520         params.num_vf_pqs = qm_info->num_vf_pqs;
1521         params.start_vport = qm_info->start_vport;
1522         params.num_vports = qm_info->num_vports;
1523         params.pf_wfq = qm_info->pf_wfq;
1524         params.pf_rl = qm_info->pf_rl;
1525         params.link_speed = p_link->speed;
1526         params.pq_params = qm_info->qm_pq_params;
1527         params.vport_params = qm_info->qm_vport_params;
1528 
1529         qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
1530 }
1531 
1532 /* CM PF */
1533 static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
1534 {
1535         /* XCM pure-LB queue */
1536         STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1537                      qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1538 }
1539 
1540 /* DQ PF */
1541 static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
1542 {
1543         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1544         u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1545 
1546         dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1547         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1548 
1549         dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1550         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1551 
1552         dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1553         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1554 
1555         dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1556         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1557 
1558         dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1559         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1560 
1561         dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1562         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1563 
1564         dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1565         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1566 
1567         dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1568         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1569 
1570         dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1571         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1572 
1573         dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1574         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1575 
1576         dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1577         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1578 
1579         dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1580         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1581 
1582         /* Connection types 6 & 7 are not in use, yet they must be configured
1583          * as the highest possible connection. Not configuring them means the
1584          * defaults will be  used, and with a large number of cids a bug may
1585          * occur, if the defaults will be smaller than dq_pf_max_cid /
1586          * dq_vf_max_cid.
1587          */
1588         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1589         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1590 
1591         STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1592         STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1593 }
1594 
1595 static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
1596 {
1597         struct qed_ilt_client_cfg *ilt_clients;
1598         int i;
1599 
1600         ilt_clients = p_hwfn->p_cxt_mngr->clients;
1601         for_each_ilt_valid_client(i, ilt_clients) {
1602                 STORE_RT_REG(p_hwfn,
1603                              ilt_clients[i].first.reg,
1604                              ilt_clients[i].first.val);
1605                 STORE_RT_REG(p_hwfn,
1606                              ilt_clients[i].last.reg, ilt_clients[i].last.val);
1607                 STORE_RT_REG(p_hwfn,
1608                              ilt_clients[i].p_size.reg,
1609                              ilt_clients[i].p_size.val);
1610         }
1611 }
1612 
1613 static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
1614 {
1615         struct qed_ilt_client_cfg *p_cli;
1616         u32 blk_factor;
1617 
1618         /* For simplicty  we set the 'block' to be an ILT page */
1619         if (p_hwfn->cdev->p_iov_info) {
1620                 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
1621 
1622                 STORE_RT_REG(p_hwfn,
1623                              PSWRQ2_REG_VF_BASE_RT_OFFSET,
1624                              p_iov->first_vf_in_pf);
1625                 STORE_RT_REG(p_hwfn,
1626                              PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1627                              p_iov->first_vf_in_pf + p_iov->total_vfs);
1628         }
1629 
1630         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1631         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1632         if (p_cli->active) {
1633                 STORE_RT_REG(p_hwfn,
1634                              PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1635                              blk_factor);
1636                 STORE_RT_REG(p_hwfn,
1637                              PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1638                              p_cli->pf_total_lines);
1639                 STORE_RT_REG(p_hwfn,
1640                              PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1641                              p_cli->vf_total_lines);
1642         }
1643 
1644         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1645         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1646         if (p_cli->active) {
1647                 STORE_RT_REG(p_hwfn,
1648                              PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1649                              blk_factor);
1650                 STORE_RT_REG(p_hwfn,
1651                              PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1652                              p_cli->pf_total_lines);
1653                 STORE_RT_REG(p_hwfn,
1654                              PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1655                              p_cli->vf_total_lines);
1656         }
1657 
1658         p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1659         blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1660         if (p_cli->active) {
1661                 STORE_RT_REG(p_hwfn,
1662                              PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1663                 STORE_RT_REG(p_hwfn,
1664                              PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1665                              p_cli->pf_total_lines);
1666                 STORE_RT_REG(p_hwfn,
1667                              PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1668                              p_cli->vf_total_lines);
1669         }
1670 }
1671 
1672 /* ILT (PSWRQ2) PF */
1673 static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
1674 {
1675         struct qed_ilt_client_cfg *clients;
1676         struct qed_cxt_mngr *p_mngr;
1677         struct qed_dma_mem *p_shdw;
1678         u32 line, rt_offst, i;
1679 
1680         qed_ilt_bounds_init(p_hwfn);
1681         qed_ilt_vf_bounds_init(p_hwfn);
1682 
1683         p_mngr = p_hwfn->p_cxt_mngr;
1684         p_shdw = p_mngr->ilt_shadow;
1685         clients = p_hwfn->p_cxt_mngr->clients;
1686 
1687         for_each_ilt_valid_client(i, clients) {
1688                 /** Client's 1st val and RT array are absolute, ILT shadows'
1689                  *  lines are relative.
1690                  */
1691                 line = clients[i].first.val - p_mngr->pf_start_line;
1692                 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1693                            clients[i].first.val * ILT_ENTRY_IN_REGS;
1694 
1695                 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1696                      line++, rt_offst += ILT_ENTRY_IN_REGS) {
1697                         u64 ilt_hw_entry = 0;
1698 
1699                         /** p_virt could be NULL incase of dynamic
1700                          *  allocation
1701                          */
1702                         if (p_shdw[line].p_virt) {
1703                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1704                                 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1705                                           (p_shdw[line].p_phys >> 12));
1706 
1707                                 DP_VERBOSE(p_hwfn, QED_MSG_ILT,
1708                                            "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
1709                                            rt_offst, line, i,
1710                                            (u64)(p_shdw[line].p_phys >> 12));
1711                         }
1712 
1713                         STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1714                 }
1715         }
1716 }
1717 
1718 /* SRC (Searcher) PF */
1719 static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
1720 {
1721         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1722         u32 rounded_conn_num, conn_num, conn_max;
1723         struct qed_src_iids src_iids;
1724 
1725         memset(&src_iids, 0, sizeof(src_iids));
1726         qed_cxt_src_iids(p_mngr, &src_iids);
1727         conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1728         if (!conn_num)
1729                 return;
1730 
1731         conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
1732         rounded_conn_num = roundup_pow_of_two(conn_max);
1733 
1734         STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1735         STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1736                      ilog2(rounded_conn_num));
1737 
1738         STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1739                          p_hwfn->p_cxt_mngr->first_free);
1740         STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1741                          p_hwfn->p_cxt_mngr->last_free);
1742 }
1743 
1744 /* Timers PF */
1745 #define TM_CFG_NUM_IDS_SHIFT            0
1746 #define TM_CFG_NUM_IDS_MASK             0xFFFFULL
1747 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT    16
1748 #define TM_CFG_PRE_SCAN_OFFSET_MASK     0x1FFULL
1749 #define TM_CFG_PARENT_PF_SHIFT          25
1750 #define TM_CFG_PARENT_PF_MASK           0x7ULL
1751 
1752 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT  30
1753 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK   0x1FFULL
1754 
1755 #define TM_CFG_TID_OFFSET_SHIFT         30
1756 #define TM_CFG_TID_OFFSET_MASK          0x7FFFFULL
1757 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT  49
1758 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK   0x1FFULL
1759 
1760 static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
1761 {
1762         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1763         u32 active_seg_mask = 0, tm_offset, rt_reg;
1764         struct qed_tm_iids tm_iids;
1765         u64 cfg_word;
1766         u8 i;
1767 
1768         memset(&tm_iids, 0, sizeof(tm_iids));
1769         qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
1770 
1771         /* @@@TBD No pre-scan for now */
1772 
1773         /* Note: We assume consecutive VFs for a PF */
1774         for (i = 0; i < p_mngr->vf_count; i++) {
1775                 cfg_word = 0;
1776                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1777                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1778                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1779                 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
1780                 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1781                     (sizeof(cfg_word) / sizeof(u32)) *
1782                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1783                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1784         }
1785 
1786         cfg_word = 0;
1787         SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1788         SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1789         SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
1790         SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);       /* scan all   */
1791 
1792         rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1793             (sizeof(cfg_word) / sizeof(u32)) *
1794             (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
1795         STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1796 
1797         /* enale scan */
1798         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1799                      tm_iids.pf_cids ? 0x1 : 0x0);
1800 
1801         /* @@@TBD how to enable the scan for the VFs */
1802 
1803         tm_offset = tm_iids.per_vf_cids;
1804 
1805         /* Note: We assume consecutive VFs for a PF */
1806         for (i = 0; i < p_mngr->vf_count; i++) {
1807                 cfg_word = 0;
1808                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1809                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1810                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1811                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1812                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1813 
1814                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1815                     (sizeof(cfg_word) / sizeof(u32)) *
1816                     (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
1817 
1818                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1819         }
1820 
1821         tm_offset = tm_iids.pf_cids;
1822         for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1823                 cfg_word = 0;
1824                 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1825                 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1826                 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1827                 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1828                 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
1829 
1830                 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1831                     (sizeof(cfg_word) / sizeof(u32)) *
1832                     (NUM_OF_VFS(p_hwfn->cdev) +
1833                      p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1834 
1835                 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1836                 active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
1837 
1838                 tm_offset += tm_iids.pf_tids[i];
1839         }
1840 
1841         if (QED_IS_RDMA_PERSONALITY(p_hwfn))
1842                 active_seg_mask = 0;
1843 
1844         STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1845 
1846         /* @@@TBD how to enable the scan for the VFs */
1847 }
1848 
1849 static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
1850 {
1851         if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
1852             p_hwfn->pf_params.fcoe_pf_params.is_target)
1853                 STORE_RT_REG(p_hwfn,
1854                              PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
1855 }
1856 
1857 static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
1858 {
1859         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1860         struct qed_conn_type_cfg *p_fcoe;
1861         struct qed_tid_seg *p_tid;
1862 
1863         p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1864 
1865         /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1866         if (!p_fcoe->cid_count)
1867                 return;
1868 
1869         p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
1870         if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
1871                 STORE_RT_REG_AGG(p_hwfn,
1872                                  PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
1873                                  p_tid->count);
1874         } else {
1875                 STORE_RT_REG_AGG(p_hwfn,
1876                                  PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1877                                  p_tid->count);
1878         }
1879 }
1880 
1881 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
1882 {
1883         qed_cdu_init_common(p_hwfn);
1884         qed_prs_init_common(p_hwfn);
1885 }
1886 
1887 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1888 {
1889         qed_qm_init_pf(p_hwfn, p_ptt, true);
1890         qed_cm_init_pf(p_hwfn);
1891         qed_dq_init_pf(p_hwfn);
1892         qed_cdu_init_pf(p_hwfn);
1893         qed_ilt_init_pf(p_hwfn);
1894         qed_src_init_pf(p_hwfn);
1895         qed_tm_init_pf(p_hwfn);
1896         qed_prs_init_pf(p_hwfn);
1897 }
1898 
1899 int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1900                          enum protocol_type type, u32 *p_cid, u8 vfid)
1901 {
1902         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1903         struct qed_cid_acquired_map *p_map;
1904         u32 rel_cid;
1905 
1906         if (type >= MAX_CONN_TYPES) {
1907                 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1908                 return -EINVAL;
1909         }
1910 
1911         if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
1912                 DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
1913                 return -EINVAL;
1914         }
1915 
1916         /* Determine the right map to take this CID from */
1917         if (vfid == QED_CXT_PF_CID)
1918                 p_map = &p_mngr->acquired[type];
1919         else
1920                 p_map = &p_mngr->acquired_vf[type][vfid];
1921 
1922         if (!p_map->cid_map) {
1923                 DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
1924                 return -EINVAL;
1925         }
1926 
1927         rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
1928 
1929         if (rel_cid >= p_map->max_count) {
1930                 DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
1931                 return -EINVAL;
1932         }
1933 
1934         __set_bit(rel_cid, p_map->cid_map);
1935 
1936         *p_cid = rel_cid + p_map->start_cid;
1937 
1938         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
1939                    "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1940                    *p_cid, rel_cid, vfid, type);
1941 
1942         return 0;
1943 }
1944 
1945 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
1946                         enum protocol_type type, u32 *p_cid)
1947 {
1948         return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
1949 }
1950 
1951 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
1952                                       u32 cid,
1953                                       u8 vfid,
1954                                       enum protocol_type *p_type,
1955                                       struct qed_cid_acquired_map **pp_map)
1956 {
1957         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1958         u32 rel_cid;
1959 
1960         /* Iterate over protocols and find matching cid range */
1961         for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1962                 if (vfid == QED_CXT_PF_CID)
1963                         *pp_map = &p_mngr->acquired[*p_type];
1964                 else
1965                         *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1966 
1967                 if (!((*pp_map)->cid_map))
1968                         continue;
1969                 if (cid >= (*pp_map)->start_cid &&
1970                     cid < (*pp_map)->start_cid + (*pp_map)->max_count)
1971                         break;
1972         }
1973 
1974         if (*p_type == MAX_CONN_TYPES) {
1975                 DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
1976                 goto fail;
1977         }
1978 
1979         rel_cid = cid - (*pp_map)->start_cid;
1980         if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
1981                 DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
1982                           cid, vfid);
1983                 goto fail;
1984         }
1985 
1986         return true;
1987 fail:
1988         *p_type = MAX_CONN_TYPES;
1989         *pp_map = NULL;
1990         return false;
1991 }
1992 
1993 void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
1994 {
1995         struct qed_cid_acquired_map *p_map = NULL;
1996         enum protocol_type type;
1997         bool b_acquired;
1998         u32 rel_cid;
1999 
2000         if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
2001                 DP_NOTICE(p_hwfn,
2002                           "Trying to return incorrect CID belonging to VF %02x\n",
2003                           vfid);
2004                 return;
2005         }
2006 
2007         /* Test acquired and find matching per-protocol map */
2008         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
2009                                                &type, &p_map);
2010 
2011         if (!b_acquired)
2012                 return;
2013 
2014         rel_cid = cid - p_map->start_cid;
2015         clear_bit(rel_cid, p_map->cid_map);
2016 
2017         DP_VERBOSE(p_hwfn, QED_MSG_CXT,
2018                    "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
2019                    cid, rel_cid, vfid, type);
2020 }
2021 
2022 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
2023 {
2024         _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
2025 }
2026 
2027 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
2028 {
2029         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2030         struct qed_cid_acquired_map *p_map = NULL;
2031         u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
2032         enum protocol_type type;
2033         bool b_acquired;
2034 
2035         /* Test acquired and find matching per-protocol map */
2036         b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
2037                                                QED_CXT_PF_CID, &type, &p_map);
2038 
2039         if (!b_acquired)
2040                 return -EINVAL;
2041 
2042         /* set the protocl type */
2043         p_info->type = type;
2044 
2045         /* compute context virtual pointer */
2046         hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
2047 
2048         conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
2049         cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
2050         line = p_info->iid / cxts_per_p;
2051 
2052         /* Make sure context is allocated (dynamic allocation) */
2053         if (!p_mngr->ilt_shadow[line].p_virt)
2054                 return -EINVAL;
2055 
2056         p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
2057                         p_info->iid % cxts_per_p * conn_cxt_size;
2058 
2059         DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
2060                    "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
2061                    p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
2062 
2063         return 0;
2064 }
2065 
2066 static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
2067                                    struct qed_rdma_pf_params *p_params,
2068                                    u32 num_tasks)
2069 {
2070         u32 num_cons, num_qps, num_srqs;
2071         enum protocol_type proto;
2072 
2073         num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
2074 
2075         if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
2076                 DP_NOTICE(p_hwfn,
2077                           "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
2078                 p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
2079         }
2080 
2081         switch (p_hwfn->hw_info.personality) {
2082         case QED_PCI_ETH_IWARP:
2083                 /* Each QP requires one connection */
2084                 num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
2085                 proto = PROTOCOLID_IWARP;
2086                 break;
2087         case QED_PCI_ETH_ROCE:
2088                 num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
2089                 num_cons = num_qps * 2; /* each QP requires two connections */
2090                 proto = PROTOCOLID_ROCE;
2091                 break;
2092         default:
2093                 return;
2094         }
2095 
2096         if (num_cons && num_tasks) {
2097                 qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
2098 
2099                 /* Deliberatly passing ROCE for tasks id. This is because
2100                  * iWARP / RoCE share the task id.
2101                  */
2102                 qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
2103                                             QED_CXT_ROCE_TID_SEG, 1,
2104                                             num_tasks, false);
2105                 qed_cxt_set_srq_count(p_hwfn, num_srqs);
2106         } else {
2107                 DP_INFO(p_hwfn->cdev,
2108                         "RDMA personality used without setting params!\n");
2109         }
2110 }
2111 
2112 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
2113 {
2114         /* Set the number of required CORE connections */
2115         u32 core_cids = 1; /* SPQ */
2116 
2117         if (p_hwfn->using_ll2)
2118                 core_cids += 4;
2119         qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
2120 
2121         switch (p_hwfn->hw_info.personality) {
2122         case QED_PCI_ETH_RDMA:
2123         case QED_PCI_ETH_IWARP:
2124         case QED_PCI_ETH_ROCE:
2125         {
2126                         qed_rdma_set_pf_params(p_hwfn,
2127                                                &p_hwfn->
2128                                                pf_params.rdma_pf_params,
2129                                                rdma_tasks);
2130                 /* no need for break since RoCE coexist with Ethernet */
2131         }
2132         /* fall through */
2133         case QED_PCI_ETH:
2134         {
2135                 struct qed_eth_pf_params *p_params =
2136                     &p_hwfn->pf_params.eth_pf_params;
2137 
2138                 if (!p_params->num_vf_cons)
2139                         p_params->num_vf_cons =
2140                             ETH_PF_PARAMS_VF_CONS_DEFAULT;
2141                 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2142                                             p_params->num_cons,
2143                                             p_params->num_vf_cons);
2144                 p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
2145                 break;
2146         }
2147         case QED_PCI_FCOE:
2148         {
2149                 struct qed_fcoe_pf_params *p_params;
2150 
2151                 p_params = &p_hwfn->pf_params.fcoe_pf_params;
2152 
2153                 if (p_params->num_cons && p_params->num_tasks) {
2154                         qed_cxt_set_proto_cid_count(p_hwfn,
2155                                                     PROTOCOLID_FCOE,
2156                                                     p_params->num_cons,
2157                                                     0);
2158 
2159                         qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
2160                                                     QED_CXT_FCOE_TID_SEG, 0,
2161                                                     p_params->num_tasks, true);
2162                 } else {
2163                         DP_INFO(p_hwfn->cdev,
2164                                 "Fcoe personality used without setting params!\n");
2165                 }
2166                 break;
2167         }
2168         case QED_PCI_ISCSI:
2169         {
2170                 struct qed_iscsi_pf_params *p_params;
2171 
2172                 p_params = &p_hwfn->pf_params.iscsi_pf_params;
2173 
2174                 if (p_params->num_cons && p_params->num_tasks) {
2175                         qed_cxt_set_proto_cid_count(p_hwfn,
2176                                                     PROTOCOLID_ISCSI,
2177                                                     p_params->num_cons,
2178                                                     0);
2179 
2180                         qed_cxt_set_proto_tid_count(p_hwfn,
2181                                                     PROTOCOLID_ISCSI,
2182                                                     QED_CXT_ISCSI_TID_SEG,
2183                                                     0,
2184                                                     p_params->num_tasks,
2185                                                     true);
2186                 } else {
2187                         DP_INFO(p_hwfn->cdev,
2188                                 "Iscsi personality used without setting params!\n");
2189                 }
2190                 break;
2191         }
2192         default:
2193                 return -EINVAL;
2194         }
2195 
2196         return 0;
2197 }
2198 
2199 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
2200                              struct qed_tid_mem *p_info)
2201 {
2202         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2203         u32 proto, seg, total_lines, i, shadow_line;
2204         struct qed_ilt_client_cfg *p_cli;
2205         struct qed_ilt_cli_blk *p_fl_seg;
2206         struct qed_tid_seg *p_seg_info;
2207 
2208         /* Verify the personality */
2209         switch (p_hwfn->hw_info.personality) {
2210         case QED_PCI_FCOE:
2211                 proto = PROTOCOLID_FCOE;
2212                 seg = QED_CXT_FCOE_TID_SEG;
2213                 break;
2214         case QED_PCI_ISCSI:
2215                 proto = PROTOCOLID_ISCSI;
2216                 seg = QED_CXT_ISCSI_TID_SEG;
2217                 break;
2218         default:
2219                 return -EINVAL;
2220         }
2221 
2222         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2223         if (!p_cli->active)
2224                 return -EINVAL;
2225 
2226         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2227         if (!p_seg_info->has_fl_mem)
2228                 return -EINVAL;
2229 
2230         p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2231         total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
2232                                    p_fl_seg->real_size_in_page);
2233 
2234         for (i = 0; i < total_lines; i++) {
2235                 shadow_line = i + p_fl_seg->start_line -
2236                     p_hwfn->p_cxt_mngr->pf_start_line;
2237                 p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
2238         }
2239         p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
2240             p_fl_seg->real_size_in_page;
2241         p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
2242         p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
2243             p_info->tid_size;
2244 
2245         return 0;
2246 }
2247 
2248 /* This function is very RoCE oriented, if another protocol in the future
2249  * will want this feature we'll need to modify the function to be more generic
2250  */
2251 int
2252 qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2253                           enum qed_cxt_elem_type elem_type, u32 iid)
2254 {
2255         u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2256         struct qed_ilt_client_cfg *p_cli;
2257         struct qed_ilt_cli_blk *p_blk;
2258         struct qed_ptt *p_ptt;
2259         dma_addr_t p_phys;
2260         u64 ilt_hw_entry;
2261         void *p_virt;
2262         int rc = 0;
2263 
2264         switch (elem_type) {
2265         case QED_ELEM_CXT:
2266                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2267                 elem_size = CONN_CXT_SIZE(p_hwfn);
2268                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2269                 break;
2270         case QED_ELEM_SRQ:
2271                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2272                 elem_size = SRQ_CXT_SIZE;
2273                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2274                 break;
2275         case QED_ELEM_TASK:
2276                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2277                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2278                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2279                 break;
2280         default:
2281                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2282                 return -EINVAL;
2283         }
2284 
2285         /* Calculate line in ilt */
2286         hw_p_size = p_cli->p_size.val;
2287         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2288         line = p_blk->start_line + (iid / elems_per_p);
2289         shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2290 
2291         /* If line is already allocated, do nothing, otherwise allocate it and
2292          * write it to the PSWRQ2 registers.
2293          * This section can be run in parallel from different contexts and thus
2294          * a mutex protection is needed.
2295          */
2296 
2297         mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
2298 
2299         if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2300                 goto out0;
2301 
2302         p_ptt = qed_ptt_acquire(p_hwfn);
2303         if (!p_ptt) {
2304                 DP_NOTICE(p_hwfn,
2305                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2306                 rc = -EBUSY;
2307                 goto out0;
2308         }
2309 
2310         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2311                                     p_blk->real_size_in_page, &p_phys,
2312                                     GFP_KERNEL);
2313         if (!p_virt) {
2314                 rc = -ENOMEM;
2315                 goto out1;
2316         }
2317 
2318         /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
2319          * to compensate for a HW bug, but it is configured even if DIF is not
2320          * enabled. This is harmless and allows us to avoid a dedicated API. We
2321          * configure the field for all of the contexts on the newly allocated
2322          * page.
2323          */
2324         if (elem_type == QED_ELEM_TASK) {
2325                 u32 elem_i;
2326                 u8 *elem_start = (u8 *)p_virt;
2327                 union type1_task_context *elem;
2328 
2329                 for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
2330                         elem = (union type1_task_context *)elem_start;
2331                         SET_FIELD(elem->roce_ctx.tdif_context.flags1,
2332                                   TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
2333                         elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
2334                 }
2335         }
2336 
2337         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2338         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2339         p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2340             p_blk->real_size_in_page;
2341 
2342         /* compute absolute offset */
2343         reg_offset = PSWRQ2_REG_ILT_MEMORY +
2344             (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2345 
2346         ilt_hw_entry = 0;
2347         SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2348         SET_FIELD(ilt_hw_entry,
2349                   ILT_ENTRY_PHY_ADDR,
2350                   (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2351 
2352         /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2353         qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
2354                           reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2355                           NULL);
2356 
2357         if (elem_type == QED_ELEM_CXT) {
2358                 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2359                     elems_per_p;
2360 
2361                 /* Update the relevant register in the parser */
2362                 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2363                        last_cid_allocated - 1);
2364 
2365                 if (!p_hwfn->b_rdma_enabled_in_prs) {
2366                         /* Enable RDMA search */
2367                         qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2368                         p_hwfn->b_rdma_enabled_in_prs = true;
2369                 }
2370         }
2371 
2372 out1:
2373         qed_ptt_release(p_hwfn, p_ptt);
2374 out0:
2375         mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
2376 
2377         return rc;
2378 }
2379 
2380 /* This function is very RoCE oriented, if another protocol in the future
2381  * will want this feature we'll need to modify the function to be more generic
2382  */
2383 static int
2384 qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
2385                        enum qed_cxt_elem_type elem_type,
2386                        u32 start_iid, u32 count)
2387 {
2388         u32 start_line, end_line, shadow_start_line, shadow_end_line;
2389         u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2390         struct qed_ilt_client_cfg *p_cli;
2391         struct qed_ilt_cli_blk *p_blk;
2392         u32 end_iid = start_iid + count;
2393         struct qed_ptt *p_ptt;
2394         u64 ilt_hw_entry = 0;
2395         u32 i;
2396 
2397         switch (elem_type) {
2398         case QED_ELEM_CXT:
2399                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2400                 elem_size = CONN_CXT_SIZE(p_hwfn);
2401                 p_blk = &p_cli->pf_blks[CDUC_BLK];
2402                 break;
2403         case QED_ELEM_SRQ:
2404                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2405                 elem_size = SRQ_CXT_SIZE;
2406                 p_blk = &p_cli->pf_blks[SRQ_BLK];
2407                 break;
2408         case QED_ELEM_TASK:
2409                 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2410                 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2411                 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
2412                 break;
2413         default:
2414                 DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
2415                 return -EINVAL;
2416         }
2417 
2418         /* Calculate line in ilt */
2419         hw_p_size = p_cli->p_size.val;
2420         elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2421         start_line = p_blk->start_line + (start_iid / elems_per_p);
2422         end_line = p_blk->start_line + (end_iid / elems_per_p);
2423         if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2424                 end_line--;
2425 
2426         shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2427         shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2428 
2429         p_ptt = qed_ptt_acquire(p_hwfn);
2430         if (!p_ptt) {
2431                 DP_NOTICE(p_hwfn,
2432                           "QED_TIME_OUT on ptt acquire - dynamic allocation");
2433                 return -EBUSY;
2434         }
2435 
2436         for (i = shadow_start_line; i < shadow_end_line; i++) {
2437                 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2438                         continue;
2439 
2440                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2441                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
2442                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2443                                   p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
2444 
2445                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
2446                 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2447                 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2448 
2449                 /* compute absolute offset */
2450                 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2451                     ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2452                      ILT_ENTRY_IN_REGS);
2453 
2454                 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2455                  * wide-bus.
2456                  */
2457                 qed_dmae_host2grc(p_hwfn, p_ptt,
2458                                   (u64) (uintptr_t) &ilt_hw_entry,
2459                                   reg_offset,
2460                                   sizeof(ilt_hw_entry) / sizeof(u32),
2461                                   NULL);
2462         }
2463 
2464         qed_ptt_release(p_hwfn, p_ptt);
2465 
2466         return 0;
2467 }
2468 
2469 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2470 {
2471         int rc;
2472         u32 cid;
2473 
2474         /* Free Connection CXT */
2475         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
2476                                     qed_cxt_get_proto_cid_start(p_hwfn,
2477                                                                 proto),
2478                                     qed_cxt_get_proto_cid_count(p_hwfn,
2479                                                                 proto, &cid));
2480 
2481         if (rc)
2482                 return rc;
2483 
2484         /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2485          * RoCE and iWARP )
2486          */
2487         proto = PROTOCOLID_ROCE;
2488         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2489                                     qed_cxt_get_proto_tid_count(p_hwfn, proto));
2490         if (rc)
2491                 return rc;
2492 
2493         /* Free TSDM CXT */
2494         rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
2495                                     qed_cxt_get_srq_count(p_hwfn));
2496 
2497         return rc;
2498 }
2499 
2500 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
2501                          u32 tid, u8 ctx_type, void **pp_task_ctx)
2502 {
2503         struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
2504         struct qed_ilt_client_cfg *p_cli;
2505         struct qed_tid_seg *p_seg_info;
2506         struct qed_ilt_cli_blk *p_seg;
2507         u32 num_tids_per_block;
2508         u32 tid_size, ilt_idx;
2509         u32 total_lines;
2510         u32 proto, seg;
2511 
2512         /* Verify the personality */
2513         switch (p_hwfn->hw_info.personality) {
2514         case QED_PCI_FCOE:
2515                 proto = PROTOCOLID_FCOE;
2516                 seg = QED_CXT_FCOE_TID_SEG;
2517                 break;
2518         case QED_PCI_ISCSI:
2519                 proto = PROTOCOLID_ISCSI;
2520                 seg = QED_CXT_ISCSI_TID_SEG;
2521                 break;
2522         default:
2523                 return -EINVAL;
2524         }
2525 
2526         p_cli = &p_mngr->clients[ILT_CLI_CDUT];
2527         if (!p_cli->active)
2528                 return -EINVAL;
2529 
2530         p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
2531 
2532         if (ctx_type == QED_CTX_WORKING_MEM) {
2533                 p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
2534         } else if (ctx_type == QED_CTX_FL_MEM) {
2535                 if (!p_seg_info->has_fl_mem)
2536                         return -EINVAL;
2537                 p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
2538         } else {
2539                 return -EINVAL;
2540         }
2541         total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
2542         tid_size = p_mngr->task_type_size[p_seg_info->type];
2543         num_tids_per_block = p_seg->real_size_in_page / tid_size;
2544 
2545         if (total_lines < tid / num_tids_per_block)
2546                 return -EINVAL;
2547 
2548         ilt_idx = tid / num_tids_per_block + p_seg->start_line -
2549                   p_mngr->pf_start_line;
2550         *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
2551                        (tid % num_tids_per_block) * tid_size;
2552 
2553         return 0;
2554 }

/* [<][>][^][v][top][bottom][index][help] */