root/drivers/net/ethernet/qlogic/qed/qed_roce.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qed_roce_async_event
  2. qed_roce_stop
  3. qed_rdma_copy_gids
  4. qed_roce_mode_to_flavor
  5. qed_roce_free_cid_pair
  6. qed_roce_alloc_cid
  7. qed_roce_set_real_cid
  8. qed_roce_get_qp_tc
  9. qed_roce_sp_create_responder
  10. qed_roce_sp_create_requester
  11. qed_roce_sp_modify_responder
  12. qed_roce_sp_modify_requester
  13. qed_roce_sp_destroy_qp_responder
  14. qed_roce_sp_destroy_qp_requester
  15. qed_roce_query_qp
  16. qed_roce_destroy_qp
  17. qed_roce_modify_qp
  18. qed_roce_free_real_icid
  19. qed_roce_dpm_dcbx
  20. qed_roce_setup
  21. qed_roce_init_hw

   1 /* QLogic qed NIC Driver
   2  * Copyright (c) 2015-2017  QLogic Corporation
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and /or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 #include <linux/types.h>
  33 #include <asm/byteorder.h>
  34 #include <linux/bitops.h>
  35 #include <linux/delay.h>
  36 #include <linux/dma-mapping.h>
  37 #include <linux/errno.h>
  38 #include <linux/io.h>
  39 #include <linux/kernel.h>
  40 #include <linux/list.h>
  41 #include <linux/module.h>
  42 #include <linux/mutex.h>
  43 #include <linux/pci.h>
  44 #include <linux/slab.h>
  45 #include <linux/spinlock.h>
  46 #include <linux/string.h>
  47 #include <linux/if_vlan.h>
  48 #include "qed.h"
  49 #include "qed_cxt.h"
  50 #include "qed_dcbx.h"
  51 #include "qed_hsi.h"
  52 #include "qed_hw.h"
  53 #include "qed_init_ops.h"
  54 #include "qed_int.h"
  55 #include "qed_ll2.h"
  56 #include "qed_mcp.h"
  57 #include "qed_reg_addr.h"
  58 #include <linux/qed/qed_rdma_if.h>
  59 #include "qed_rdma.h"
  60 #include "qed_roce.h"
  61 #include "qed_sp.h"
  62 
  63 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
  64 
  65 static int
  66 qed_roce_async_event(struct qed_hwfn *p_hwfn,
  67                      u8 fw_event_code,
  68                      u16 echo, union event_ring_data *data, u8 fw_return_code)
  69 {
  70         struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
  71 
  72         if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
  73                 u16 icid =
  74                     (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
  75 
  76                 /* icid release in this async event can occur only if the icid
  77                  * was offloaded to the FW. In case it wasn't offloaded this is
  78                  * handled in qed_roce_sp_destroy_qp.
  79                  */
  80                 qed_roce_free_real_icid(p_hwfn, icid);
  81         } else {
  82                 if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
  83                     fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
  84                         u16 srq_id = (u16)data->rdma_data.async_handle.lo;
  85 
  86                         events.affiliated_event(events.context, fw_event_code,
  87                                                 &srq_id);
  88                 } else {
  89                         union rdma_eqe_data rdata = data->rdma_data;
  90 
  91                         events.affiliated_event(events.context, fw_event_code,
  92                                                 (void *)&rdata.async_handle);
  93                 }
  94         }
  95 
  96         return 0;
  97 }
  98 
  99 void qed_roce_stop(struct qed_hwfn *p_hwfn)
 100 {
 101         struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
 102         int wait_count = 0;
 103 
 104         /* when destroying a_RoCE QP the control is returned to the user after
 105          * the synchronous part. The asynchronous part may take a little longer.
 106          * We delay for a short while if an async destroy QP is still expected.
 107          * Beyond the added delay we clear the bitmap anyway.
 108          */
 109         while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
 110                 msleep(100);
 111                 if (wait_count++ > 20) {
 112                         DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
 113                         break;
 114                 }
 115         }
 116         qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
 117 }
 118 
 119 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
 120                                __le32 *dst_gid)
 121 {
 122         u32 i;
 123 
 124         if (qp->roce_mode == ROCE_V2_IPV4) {
 125                 /* The IPv4 addresses shall be aligned to the highest word.
 126                  * The lower words must be zero.
 127                  */
 128                 memset(src_gid, 0, sizeof(union qed_gid));
 129                 memset(dst_gid, 0, sizeof(union qed_gid));
 130                 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
 131                 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
 132         } else {
 133                 /* GIDs and IPv6 addresses coincide in location and size */
 134                 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
 135                         src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
 136                         dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
 137                 }
 138         }
 139 }
 140 
 141 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 142 {
 143         switch (roce_mode) {
 144         case ROCE_V1:
 145                 return PLAIN_ROCE;
 146         case ROCE_V2_IPV4:
 147                 return RROCE_IPV4;
 148         case ROCE_V2_IPV6:
 149                 return RROCE_IPV6;
 150         default:
 151                 return MAX_ROCE_FLAVOR;
 152         }
 153 }
 154 
 155 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
 156 {
 157         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 158         qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 159         qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
 160         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 161 }
 162 
 163 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 164 {
 165         struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 166         u32 responder_icid;
 167         u32 requester_icid;
 168         int rc;
 169 
 170         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 171         rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
 172                                     &responder_icid);
 173         if (rc) {
 174                 spin_unlock_bh(&p_rdma_info->lock);
 175                 return rc;
 176         }
 177 
 178         rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
 179                                     &requester_icid);
 180 
 181         spin_unlock_bh(&p_rdma_info->lock);
 182         if (rc)
 183                 goto err;
 184 
 185         /* the two icid's should be adjacent */
 186         if ((requester_icid - responder_icid) != 1) {
 187                 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
 188                 rc = -EINVAL;
 189                 goto err;
 190         }
 191 
 192         responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
 193                                                       p_rdma_info->proto);
 194         requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
 195                                                       p_rdma_info->proto);
 196 
 197         /* If these icids require a new ILT line allocate DMA-able context for
 198          * an ILT page
 199          */
 200         rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
 201         if (rc)
 202                 goto err;
 203 
 204         rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
 205         if (rc)
 206                 goto err;
 207 
 208         *cid = (u16)responder_icid;
 209         return rc;
 210 
 211 err:
 212         spin_lock_bh(&p_rdma_info->lock);
 213         qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
 214         qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
 215 
 216         spin_unlock_bh(&p_rdma_info->lock);
 217         DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 218                    "Allocate CID - failed, rc = %d\n", rc);
 219         return rc;
 220 }
 221 
 222 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
 223 {
 224         spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 225         qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
 226         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 227 }
 228 
 229 static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 230 {
 231         u8 pri, tc = 0;
 232 
 233         if (qp->vlan_id) {
 234                 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 235                 tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
 236         }
 237 
 238         DP_VERBOSE(p_hwfn, QED_MSG_SP,
 239                    "qp icid %u tc: %u (vlan priority %s)\n",
 240                    qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
 241 
 242         return tc;
 243 }
 244 
 245 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 246                                         struct qed_rdma_qp *qp)
 247 {
 248         struct roce_create_qp_resp_ramrod_data *p_ramrod;
 249         u16 regular_latency_queue, low_latency_queue;
 250         struct qed_sp_init_data init_data;
 251         enum roce_flavor roce_flavor;
 252         struct qed_spq_entry *p_ent;
 253         enum protocol_type proto;
 254         int rc;
 255         u8 tc;
 256 
 257         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 258 
 259         /* Allocate DMA-able memory for IRQ */
 260         qp->irq_num_pages = 1;
 261         qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 262                                      RDMA_RING_PAGE_SIZE,
 263                                      &qp->irq_phys_addr, GFP_KERNEL);
 264         if (!qp->irq) {
 265                 rc = -ENOMEM;
 266                 DP_NOTICE(p_hwfn,
 267                           "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
 268                           rc);
 269                 return rc;
 270         }
 271 
 272         /* Get SPQ entry */
 273         memset(&init_data, 0, sizeof(init_data));
 274         init_data.cid = qp->icid;
 275         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 276         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 277 
 278         rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
 279                                  PROTOCOLID_ROCE, &init_data);
 280         if (rc)
 281                 goto err;
 282 
 283         p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
 284 
 285         p_ramrod->flags = 0;
 286 
 287         roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
 288         SET_FIELD(p_ramrod->flags,
 289                   ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
 290 
 291         SET_FIELD(p_ramrod->flags,
 292                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
 293                   qp->incoming_rdma_read_en);
 294 
 295         SET_FIELD(p_ramrod->flags,
 296                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
 297                   qp->incoming_rdma_write_en);
 298 
 299         SET_FIELD(p_ramrod->flags,
 300                   ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
 301                   qp->incoming_atomic_en);
 302 
 303         SET_FIELD(p_ramrod->flags,
 304                   ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
 305                   qp->e2e_flow_control_en);
 306 
 307         SET_FIELD(p_ramrod->flags,
 308                   ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 309 
 310         SET_FIELD(p_ramrod->flags,
 311                   ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
 312                   qp->fmr_and_reserved_lkey);
 313 
 314         SET_FIELD(p_ramrod->flags,
 315                   ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
 316                   qp->min_rnr_nak_timer);
 317 
 318         p_ramrod->max_ird = qp->max_rd_atomic_resp;
 319         p_ramrod->traffic_class = qp->traffic_class_tos;
 320         p_ramrod->hop_limit = qp->hop_limit_ttl;
 321         p_ramrod->irq_num_pages = qp->irq_num_pages;
 322         p_ramrod->p_key = cpu_to_le16(qp->pkey);
 323         p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 324         p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
 325         p_ramrod->mtu = cpu_to_le16(qp->mtu);
 326         p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
 327         p_ramrod->pd = cpu_to_le16(qp->pd);
 328         p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
 329         DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
 330         DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
 331         qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 332         p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
 333         p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 334         p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 335         p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 336         p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
 337                                        qp->rq_cq_id);
 338 
 339         tc = qed_roce_get_qp_tc(p_hwfn, qp);
 340         regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
 341         low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
 342         DP_VERBOSE(p_hwfn, QED_MSG_SP,
 343                    "qp icid %u pqs: regular_latency %u low_latency %u\n",
 344                    qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
 345                    low_latency_queue - CM_TX_PQ_BASE);
 346         p_ramrod->regular_latency_phy_queue =
 347             cpu_to_le16(regular_latency_queue);
 348         p_ramrod->low_latency_phy_queue =
 349             cpu_to_le16(low_latency_queue);
 350 
 351         p_ramrod->dpi = cpu_to_le16(qp->dpi);
 352 
 353         qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
 354         qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 355 
 356         p_ramrod->udp_src_port = qp->udp_src_port;
 357         p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
 358         p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 359         p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 360 
 361         p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
 362                                      qp->stats_queue;
 363 
 364         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 365         if (rc)
 366                 goto err;
 367 
 368         qp->resp_offloaded = true;
 369         qp->cq_prod = 0;
 370 
 371         proto = p_hwfn->p_rdma_info->proto;
 372         qed_roce_set_real_cid(p_hwfn, qp->icid -
 373                               qed_cxt_get_proto_cid_start(p_hwfn, proto));
 374 
 375         return rc;
 376 
 377 err:
 378         DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
 379         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 380                           qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
 381                           qp->irq, qp->irq_phys_addr);
 382 
 383         return rc;
 384 }
 385 
 386 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 387                                         struct qed_rdma_qp *qp)
 388 {
 389         struct roce_create_qp_req_ramrod_data *p_ramrod;
 390         u16 regular_latency_queue, low_latency_queue;
 391         struct qed_sp_init_data init_data;
 392         enum roce_flavor roce_flavor;
 393         struct qed_spq_entry *p_ent;
 394         enum protocol_type proto;
 395         int rc;
 396         u8 tc;
 397 
 398         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 399 
 400         /* Allocate DMA-able memory for ORQ */
 401         qp->orq_num_pages = 1;
 402         qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 403                                      RDMA_RING_PAGE_SIZE,
 404                                      &qp->orq_phys_addr, GFP_KERNEL);
 405         if (!qp->orq) {
 406                 rc = -ENOMEM;
 407                 DP_NOTICE(p_hwfn,
 408                           "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
 409                           rc);
 410                 return rc;
 411         }
 412 
 413         /* Get SPQ entry */
 414         memset(&init_data, 0, sizeof(init_data));
 415         init_data.cid = qp->icid + 1;
 416         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 417         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 418 
 419         rc = qed_sp_init_request(p_hwfn, &p_ent,
 420                                  ROCE_RAMROD_CREATE_QP,
 421                                  PROTOCOLID_ROCE, &init_data);
 422         if (rc)
 423                 goto err;
 424 
 425         p_ramrod = &p_ent->ramrod.roce_create_qp_req;
 426 
 427         p_ramrod->flags = 0;
 428 
 429         roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
 430         SET_FIELD(p_ramrod->flags,
 431                   ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
 432 
 433         SET_FIELD(p_ramrod->flags,
 434                   ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
 435                   qp->fmr_and_reserved_lkey);
 436 
 437         SET_FIELD(p_ramrod->flags,
 438                   ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 439 
 440         SET_FIELD(p_ramrod->flags,
 441                   ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
 442 
 443         SET_FIELD(p_ramrod->flags,
 444                   ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
 445                   qp->rnr_retry_cnt);
 446 
 447         p_ramrod->max_ord = qp->max_rd_atomic_req;
 448         p_ramrod->traffic_class = qp->traffic_class_tos;
 449         p_ramrod->hop_limit = qp->hop_limit_ttl;
 450         p_ramrod->orq_num_pages = qp->orq_num_pages;
 451         p_ramrod->p_key = cpu_to_le16(qp->pkey);
 452         p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 453         p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
 454         p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
 455         p_ramrod->mtu = cpu_to_le16(qp->mtu);
 456         p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
 457         p_ramrod->pd = cpu_to_le16(qp->pd);
 458         p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
 459         DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
 460         DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
 461         qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 462         p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
 463         p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 464         p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 465         p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 466         p_ramrod->cq_cid =
 467             cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 468 
 469         tc = qed_roce_get_qp_tc(p_hwfn, qp);
 470         regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
 471         low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
 472         DP_VERBOSE(p_hwfn, QED_MSG_SP,
 473                    "qp icid %u pqs: regular_latency %u low_latency %u\n",
 474                    qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
 475                    low_latency_queue - CM_TX_PQ_BASE);
 476         p_ramrod->regular_latency_phy_queue =
 477             cpu_to_le16(regular_latency_queue);
 478         p_ramrod->low_latency_phy_queue =
 479             cpu_to_le16(low_latency_queue);
 480 
 481         p_ramrod->dpi = cpu_to_le16(qp->dpi);
 482 
 483         qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
 484         qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 485 
 486         p_ramrod->udp_src_port = qp->udp_src_port;
 487         p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
 488         p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
 489                                      qp->stats_queue;
 490 
 491         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 492         if (rc)
 493                 goto err;
 494 
 495         qp->req_offloaded = true;
 496         proto = p_hwfn->p_rdma_info->proto;
 497         qed_roce_set_real_cid(p_hwfn,
 498                               qp->icid + 1 -
 499                               qed_cxt_get_proto_cid_start(p_hwfn, proto));
 500 
 501         return rc;
 502 
 503 err:
 504         DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
 505         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 506                           qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
 507                           qp->orq, qp->orq_phys_addr);
 508         return rc;
 509 }
 510 
 511 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
 512                                         struct qed_rdma_qp *qp,
 513                                         bool move_to_err, u32 modify_flags)
 514 {
 515         struct roce_modify_qp_resp_ramrod_data *p_ramrod;
 516         struct qed_sp_init_data init_data;
 517         struct qed_spq_entry *p_ent;
 518         int rc;
 519 
 520         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 521 
 522         if (move_to_err && !qp->resp_offloaded)
 523                 return 0;
 524 
 525         /* Get SPQ entry */
 526         memset(&init_data, 0, sizeof(init_data));
 527         init_data.cid = qp->icid;
 528         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 529         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 530 
 531         rc = qed_sp_init_request(p_hwfn, &p_ent,
 532                                  ROCE_EVENT_MODIFY_QP,
 533                                  PROTOCOLID_ROCE, &init_data);
 534         if (rc) {
 535                 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
 536                 return rc;
 537         }
 538 
 539         p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
 540 
 541         p_ramrod->flags = 0;
 542 
 543         SET_FIELD(p_ramrod->flags,
 544                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
 545 
 546         SET_FIELD(p_ramrod->flags,
 547                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
 548                   qp->incoming_rdma_read_en);
 549 
 550         SET_FIELD(p_ramrod->flags,
 551                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
 552                   qp->incoming_rdma_write_en);
 553 
 554         SET_FIELD(p_ramrod->flags,
 555                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
 556                   qp->incoming_atomic_en);
 557 
 558         SET_FIELD(p_ramrod->flags,
 559                   ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
 560                   qp->e2e_flow_control_en);
 561 
 562         SET_FIELD(p_ramrod->flags,
 563                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
 564                   GET_FIELD(modify_flags,
 565                             QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
 566 
 567         SET_FIELD(p_ramrod->flags,
 568                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
 569                   GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 570 
 571         SET_FIELD(p_ramrod->flags,
 572                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
 573                   GET_FIELD(modify_flags,
 574                             QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 575 
 576         SET_FIELD(p_ramrod->flags,
 577                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
 578                   GET_FIELD(modify_flags,
 579                             QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
 580 
 581         SET_FIELD(p_ramrod->flags,
 582                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
 583                   GET_FIELD(modify_flags,
 584                             QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
 585 
 586         p_ramrod->fields = 0;
 587         SET_FIELD(p_ramrod->fields,
 588                   ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
 589                   qp->min_rnr_nak_timer);
 590 
 591         p_ramrod->max_ird = qp->max_rd_atomic_resp;
 592         p_ramrod->traffic_class = qp->traffic_class_tos;
 593         p_ramrod->hop_limit = qp->hop_limit_ttl;
 594         p_ramrod->p_key = cpu_to_le16(qp->pkey);
 595         p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 596         p_ramrod->mtu = cpu_to_le16(qp->mtu);
 597         qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 598         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 599 
 600         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
 601         return rc;
 602 }
 603 
 604 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
 605                                         struct qed_rdma_qp *qp,
 606                                         bool move_to_sqd,
 607                                         bool move_to_err, u32 modify_flags)
 608 {
 609         struct roce_modify_qp_req_ramrod_data *p_ramrod;
 610         struct qed_sp_init_data init_data;
 611         struct qed_spq_entry *p_ent;
 612         int rc;
 613 
 614         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 615 
 616         if (move_to_err && !(qp->req_offloaded))
 617                 return 0;
 618 
 619         /* Get SPQ entry */
 620         memset(&init_data, 0, sizeof(init_data));
 621         init_data.cid = qp->icid + 1;
 622         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 623         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 624 
 625         rc = qed_sp_init_request(p_hwfn, &p_ent,
 626                                  ROCE_EVENT_MODIFY_QP,
 627                                  PROTOCOLID_ROCE, &init_data);
 628         if (rc) {
 629                 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
 630                 return rc;
 631         }
 632 
 633         p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
 634 
 635         p_ramrod->flags = 0;
 636 
 637         SET_FIELD(p_ramrod->flags,
 638                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
 639 
 640         SET_FIELD(p_ramrod->flags,
 641                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
 642 
 643         SET_FIELD(p_ramrod->flags,
 644                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
 645                   qp->sqd_async);
 646 
 647         SET_FIELD(p_ramrod->flags,
 648                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
 649                   GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 650 
 651         SET_FIELD(p_ramrod->flags,
 652                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
 653                   GET_FIELD(modify_flags,
 654                             QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 655 
 656         SET_FIELD(p_ramrod->flags,
 657                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
 658                   GET_FIELD(modify_flags,
 659                             QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
 660 
 661         SET_FIELD(p_ramrod->flags,
 662                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
 663                   GET_FIELD(modify_flags,
 664                             QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
 665 
 666         SET_FIELD(p_ramrod->flags,
 667                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
 668                   GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
 669 
 670         SET_FIELD(p_ramrod->flags,
 671                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
 672                   GET_FIELD(modify_flags,
 673                             QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
 674 
 675         p_ramrod->fields = 0;
 676         SET_FIELD(p_ramrod->fields,
 677                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
 678 
 679         SET_FIELD(p_ramrod->fields,
 680                   ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
 681                   qp->rnr_retry_cnt);
 682 
 683         p_ramrod->max_ord = qp->max_rd_atomic_req;
 684         p_ramrod->traffic_class = qp->traffic_class_tos;
 685         p_ramrod->hop_limit = qp->hop_limit_ttl;
 686         p_ramrod->p_key = cpu_to_le16(qp->pkey);
 687         p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 688         p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
 689         p_ramrod->mtu = cpu_to_le16(qp->mtu);
 690         qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 691         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 692 
 693         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
 694         return rc;
 695 }
 696 
 697 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 698                                             struct qed_rdma_qp *qp,
 699                                             u32 *cq_prod)
 700 {
 701         struct roce_destroy_qp_resp_output_params *p_ramrod_res;
 702         struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
 703         struct qed_sp_init_data init_data;
 704         struct qed_spq_entry *p_ent;
 705         dma_addr_t ramrod_res_phys;
 706         int rc;
 707 
 708         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 709         *cq_prod = qp->cq_prod;
 710 
 711         if (!qp->resp_offloaded) {
 712                 /* If a responder was never offload, we need to free the cids
 713                  * allocated in create_qp as a FW async event will never arrive
 714                  */
 715                 u32 cid;
 716 
 717                 cid = qp->icid -
 718                       qed_cxt_get_proto_cid_start(p_hwfn,
 719                                                   p_hwfn->p_rdma_info->proto);
 720                 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
 721 
 722                 return 0;
 723         }
 724 
 725         /* Get SPQ entry */
 726         memset(&init_data, 0, sizeof(init_data));
 727         init_data.cid = qp->icid;
 728         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 729         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 730 
 731         rc = qed_sp_init_request(p_hwfn, &p_ent,
 732                                  ROCE_RAMROD_DESTROY_QP,
 733                                  PROTOCOLID_ROCE, &init_data);
 734         if (rc)
 735                 return rc;
 736 
 737         p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
 738 
 739         p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
 740             dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
 741                                &ramrod_res_phys, GFP_KERNEL);
 742 
 743         if (!p_ramrod_res) {
 744                 rc = -ENOMEM;
 745                 DP_NOTICE(p_hwfn,
 746                           "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
 747                           rc);
 748                 qed_sp_destroy_request(p_hwfn, p_ent);
 749                 return rc;
 750         }
 751 
 752         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 753 
 754         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 755         if (rc)
 756                 goto err;
 757 
 758         *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
 759         qp->cq_prod = *cq_prod;
 760 
 761         /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
 762         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 763                           qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
 764                           qp->irq, qp->irq_phys_addr);
 765 
 766         qp->resp_offloaded = false;
 767 
 768         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
 769 
 770 err:
 771         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 772                           sizeof(struct roce_destroy_qp_resp_output_params),
 773                           p_ramrod_res, ramrod_res_phys);
 774 
 775         return rc;
 776 }
 777 
 778 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
 779                                             struct qed_rdma_qp *qp)
 780 {
 781         struct roce_destroy_qp_req_output_params *p_ramrod_res;
 782         struct roce_destroy_qp_req_ramrod_data *p_ramrod;
 783         struct qed_sp_init_data init_data;
 784         struct qed_spq_entry *p_ent;
 785         dma_addr_t ramrod_res_phys;
 786         int rc = -ENOMEM;
 787 
 788         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 789 
 790         if (!qp->req_offloaded)
 791                 return 0;
 792 
 793         p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
 794                        dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 795                                           sizeof(*p_ramrod_res),
 796                                           &ramrod_res_phys, GFP_KERNEL);
 797         if (!p_ramrod_res) {
 798                 DP_NOTICE(p_hwfn,
 799                           "qed destroy requester failed: cannot allocate memory (ramrod)\n");
 800                 return rc;
 801         }
 802 
 803         /* Get SPQ entry */
 804         memset(&init_data, 0, sizeof(init_data));
 805         init_data.cid = qp->icid + 1;
 806         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 807         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 808 
 809         rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
 810                                  PROTOCOLID_ROCE, &init_data);
 811         if (rc)
 812                 goto err;
 813 
 814         p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
 815         DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 816 
 817         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 818         if (rc)
 819                 goto err;
 820 
 821 
 822         /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
 823         dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 824                           qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
 825                           qp->orq, qp->orq_phys_addr);
 826 
 827         qp->req_offloaded = false;
 828 
 829         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
 830 
 831 err:
 832         dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
 833                           p_ramrod_res, ramrod_res_phys);
 834 
 835         return rc;
 836 }
 837 
 838 int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 839                       struct qed_rdma_qp *qp,
 840                       struct qed_rdma_query_qp_out_params *out_params)
 841 {
 842         struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
 843         struct roce_query_qp_req_output_params *p_req_ramrod_res;
 844         struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
 845         struct roce_query_qp_req_ramrod_data *p_req_ramrod;
 846         struct qed_sp_init_data init_data;
 847         dma_addr_t resp_ramrod_res_phys;
 848         dma_addr_t req_ramrod_res_phys;
 849         struct qed_spq_entry *p_ent;
 850         bool rq_err_state;
 851         bool sq_err_state;
 852         bool sq_draining;
 853         int rc = -ENOMEM;
 854 
 855         if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
 856                 /* We can't send ramrod to the fw since this qp wasn't offloaded
 857                  * to the fw yet
 858                  */
 859                 out_params->draining = false;
 860                 out_params->rq_psn = qp->rq_psn;
 861                 out_params->sq_psn = qp->sq_psn;
 862                 out_params->state = qp->cur_state;
 863 
 864                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
 865                 return 0;
 866         }
 867 
 868         if (!(qp->resp_offloaded)) {
 869                 DP_NOTICE(p_hwfn,
 870                           "The responder's qp should be offloaded before requester's\n");
 871                 return -EINVAL;
 872         }
 873 
 874         /* Send a query responder ramrod to FW to get RQ-PSN and state */
 875         p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
 876             dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 877                                sizeof(*p_resp_ramrod_res),
 878                                &resp_ramrod_res_phys, GFP_KERNEL);
 879         if (!p_resp_ramrod_res) {
 880                 DP_NOTICE(p_hwfn,
 881                           "qed query qp failed: cannot allocate memory (ramrod)\n");
 882                 return rc;
 883         }
 884 
 885         /* Get SPQ entry */
 886         memset(&init_data, 0, sizeof(init_data));
 887         init_data.cid = qp->icid;
 888         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 889         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 890         rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 891                                  PROTOCOLID_ROCE, &init_data);
 892         if (rc)
 893                 goto err_resp;
 894 
 895         p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
 896         DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
 897 
 898         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 899         if (rc)
 900                 goto err_resp;
 901 
 902         out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
 903         rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
 904                                  ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
 905 
 906         dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
 907                           p_resp_ramrod_res, resp_ramrod_res_phys);
 908 
 909         if (!(qp->req_offloaded)) {
 910                 /* Don't send query qp for the requester */
 911                 out_params->sq_psn = qp->sq_psn;
 912                 out_params->draining = false;
 913 
 914                 if (rq_err_state)
 915                         qp->cur_state = QED_ROCE_QP_STATE_ERR;
 916 
 917                 out_params->state = qp->cur_state;
 918 
 919                 return 0;
 920         }
 921 
 922         /* Send a query requester ramrod to FW to get SQ-PSN and state */
 923         p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
 924                            dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 925                                               sizeof(*p_req_ramrod_res),
 926                                               &req_ramrod_res_phys,
 927                                               GFP_KERNEL);
 928         if (!p_req_ramrod_res) {
 929                 rc = -ENOMEM;
 930                 DP_NOTICE(p_hwfn,
 931                           "qed query qp failed: cannot allocate memory (ramrod)\n");
 932                 return rc;
 933         }
 934 
 935         /* Get SPQ entry */
 936         init_data.cid = qp->icid + 1;
 937         rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 938                                  PROTOCOLID_ROCE, &init_data);
 939         if (rc)
 940                 goto err_req;
 941 
 942         p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
 943         DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
 944 
 945         rc = qed_spq_post(p_hwfn, p_ent, NULL);
 946         if (rc)
 947                 goto err_req;
 948 
 949         out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
 950         sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
 951                                  ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
 952         sq_draining =
 953                 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
 954                           ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
 955 
 956         dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
 957                           p_req_ramrod_res, req_ramrod_res_phys);
 958 
 959         out_params->draining = false;
 960 
 961         if (rq_err_state || sq_err_state)
 962                 qp->cur_state = QED_ROCE_QP_STATE_ERR;
 963         else if (sq_draining)
 964                 out_params->draining = true;
 965         out_params->state = qp->cur_state;
 966 
 967         return 0;
 968 
 969 err_req:
 970         dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
 971                           p_req_ramrod_res, req_ramrod_res_phys);
 972         return rc;
 973 err_resp:
 974         dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
 975                           p_resp_ramrod_res, resp_ramrod_res_phys);
 976         return rc;
 977 }
 978 
 979 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 980 {
 981         u32 cq_prod;
 982         int rc;
 983 
 984         /* Destroys the specified QP */
 985         if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
 986             (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
 987             (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
 988                 DP_NOTICE(p_hwfn,
 989                           "QP must be in error, reset or init state before destroying it\n");
 990                 return -EINVAL;
 991         }
 992 
 993         if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
 994                 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
 995                                                       &cq_prod);
 996                 if (rc)
 997                         return rc;
 998 
 999                 /* Send destroy requester ramrod */
1000                 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1001                 if (rc)
1002                         return rc;
1003         }
1004 
1005         return 0;
1006 }
1007 
1008 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
1009                        struct qed_rdma_qp *qp,
1010                        enum qed_roce_qp_state prev_state,
1011                        struct qed_rdma_modify_qp_in_params *params)
1012 {
1013         int rc = 0;
1014 
1015         /* Perform additional operations according to the current state and the
1016          * next state
1017          */
1018         if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
1019              (prev_state == QED_ROCE_QP_STATE_RESET)) &&
1020             (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
1021                 /* Init->RTR or Reset->RTR */
1022                 rc = qed_roce_sp_create_responder(p_hwfn, qp);
1023                 return rc;
1024         } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
1025                    (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1026                 /* RTR-> RTS */
1027                 rc = qed_roce_sp_create_requester(p_hwfn, qp);
1028                 if (rc)
1029                         return rc;
1030 
1031                 /* Send modify responder ramrod */
1032                 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1033                                                   params->modify_flags);
1034                 return rc;
1035         } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1036                    (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1037                 /* RTS->RTS */
1038                 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1039                                                   params->modify_flags);
1040                 if (rc)
1041                         return rc;
1042 
1043                 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1044                                                   params->modify_flags);
1045                 return rc;
1046         } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1047                    (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1048                 /* RTS->SQD */
1049                 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1050                                                   params->modify_flags);
1051                 return rc;
1052         } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1053                    (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1054                 /* SQD->SQD */
1055                 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1056                                                   params->modify_flags);
1057                 if (rc)
1058                         return rc;
1059 
1060                 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1061                                                   params->modify_flags);
1062                 return rc;
1063         } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1064                    (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1065                 /* SQD->RTS */
1066                 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1067                                                   params->modify_flags);
1068                 if (rc)
1069                         return rc;
1070 
1071                 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1072                                                   params->modify_flags);
1073 
1074                 return rc;
1075         } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1076                 /* ->ERR */
1077                 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1078                                                   params->modify_flags);
1079                 if (rc)
1080                         return rc;
1081 
1082                 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1083                                                   params->modify_flags);
1084                 return rc;
1085         } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1086                 /* Any state -> RESET */
1087                 u32 cq_prod;
1088 
1089                 /* Send destroy responder ramrod */
1090                 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1091                                                       qp,
1092                                                       &cq_prod);
1093 
1094                 if (rc)
1095                         return rc;
1096 
1097                 qp->cq_prod = cq_prod;
1098 
1099                 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
1100         } else {
1101                 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1102         }
1103 
1104         return rc;
1105 }
1106 
1107 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1108 {
1109         struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1110         u32 start_cid, cid, xcid;
1111 
1112         /* an even icid belongs to a responder while an odd icid belongs to a
1113          * requester. The 'cid' received as an input can be either. We calculate
1114          * the "partner" icid and call it xcid. Only if both are free then the
1115          * "cid" map can be cleared.
1116          */
1117         start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1118         cid = icid - start_cid;
1119         xcid = cid ^ 1;
1120 
1121         spin_lock_bh(&p_rdma_info->lock);
1122 
1123         qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1124         if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1125                 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1126                 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1127         }
1128 
1129         spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1130 }
1131 
1132 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1133 {
1134         u8 val;
1135 
1136         /* if any QPs are already active, we want to disable DPM, since their
1137          * context information contains information from before the latest DCBx
1138          * update. Otherwise enable it.
1139          */
1140         val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1141         p_hwfn->dcbx_no_edpm = (u8)val;
1142 
1143         qed_rdma_dpm_conf(p_hwfn, p_ptt);
1144 }
1145 
1146 int qed_roce_setup(struct qed_hwfn *p_hwfn)
1147 {
1148         return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1149                                          qed_roce_async_event);
1150 }
1151 
1152 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1153 {
1154         u32 ll2_ethertype_en;
1155 
1156         qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1157 
1158         p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1159 
1160         ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1161         qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1162                (ll2_ethertype_en | 0x01));
1163 
1164         if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1165                 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1166                 return -EINVAL;
1167         }
1168 
1169         DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1170         return 0;
1171 }

/* [<][>][^][v][top][bottom][index][help] */