root/drivers/infiniband/hw/qedr/qedr_roce_cm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. qedr_inc_sw_gsi_cons
  2. qedr_store_gsi_qp_cq
  3. qedr_ll2_complete_tx_packet
  4. qedr_ll2_complete_rx_packet
  5. qedr_ll2_release_rx_packet
  6. qedr_destroy_gsi_cq
  7. qedr_check_gsi_qp_attrs
  8. qedr_ll2_post_tx
  9. qedr_ll2_stop
  10. qedr_ll2_start
  11. qedr_create_gsi_qp
  12. qedr_destroy_gsi_qp
  13. qedr_gsi_build_header
  14. qedr_gsi_build_packet
  15. qedr_gsi_post_send
  16. qedr_gsi_post_recv
  17. qedr_gsi_poll_cq

   1 /* QLogic qedr NIC Driver
   2  * Copyright (c) 2015-2016  QLogic Corporation
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and /or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  */
  32 #include <linux/dma-mapping.h>
  33 #include <linux/crc32.h>
  34 #include <linux/iommu.h>
  35 #include <net/ip.h>
  36 #include <net/ipv6.h>
  37 #include <net/udp.h>
  38 
  39 #include <rdma/ib_verbs.h>
  40 #include <rdma/ib_user_verbs.h>
  41 #include <rdma/iw_cm.h>
  42 #include <rdma/ib_umem.h>
  43 #include <rdma/ib_addr.h>
  44 #include <rdma/ib_cache.h>
  45 
  46 #include <linux/qed/qed_if.h>
  47 #include <linux/qed/qed_rdma_if.h>
  48 #include "qedr.h"
  49 #include "verbs.h"
  50 #include <rdma/qedr-abi.h>
  51 #include "qedr_roce_cm.h"
  52 
  53 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
  54 {
  55         info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
  56 }
  57 
  58 void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
  59                           struct ib_qp_init_attr *attrs)
  60 {
  61         dev->gsi_qp_created = 1;
  62         dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
  63         dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
  64         dev->gsi_qp = qp;
  65 }
  66 
  67 static void qedr_ll2_complete_tx_packet(void *cxt, u8 connection_handle,
  68                                         void *cookie,
  69                                         dma_addr_t first_frag_addr,
  70                                         bool b_last_fragment,
  71                                         bool b_last_packet)
  72 {
  73         struct qedr_dev *dev = (struct qedr_dev *)cxt;
  74         struct qed_roce_ll2_packet *pkt = cookie;
  75         struct qedr_cq *cq = dev->gsi_sqcq;
  76         struct qedr_qp *qp = dev->gsi_qp;
  77         unsigned long flags;
  78 
  79         DP_DEBUG(dev, QEDR_MSG_GSI,
  80                  "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
  81                  dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
  82                  cq->ibcq.comp_handler ? "Yes" : "No");
  83 
  84         dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
  85                           pkt->header.baddr);
  86         kfree(pkt);
  87 
  88         spin_lock_irqsave(&qp->q_lock, flags);
  89         qedr_inc_sw_gsi_cons(&qp->sq);
  90         spin_unlock_irqrestore(&qp->q_lock, flags);
  91 
  92         if (cq->ibcq.comp_handler)
  93                 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
  94 }
  95 
  96 static void qedr_ll2_complete_rx_packet(void *cxt,
  97                                         struct qed_ll2_comp_rx_data *data)
  98 {
  99         struct qedr_dev *dev = (struct qedr_dev *)cxt;
 100         struct qedr_cq *cq = dev->gsi_rqcq;
 101         struct qedr_qp *qp = dev->gsi_qp;
 102         unsigned long flags;
 103 
 104         spin_lock_irqsave(&qp->q_lock, flags);
 105 
 106         qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
 107                 -EINVAL : 0;
 108         qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
 109         /* note: length stands for data length i.e. GRH is excluded */
 110         qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
 111                 data->length.data_length;
 112         *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) =
 113                 ntohl(data->opaque_data_0);
 114         *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) =
 115                 ntohs((u16)data->opaque_data_1);
 116 
 117         qedr_inc_sw_gsi_cons(&qp->rq);
 118 
 119         spin_unlock_irqrestore(&qp->q_lock, flags);
 120 
 121         if (cq->ibcq.comp_handler)
 122                 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
 123 }
 124 
 125 static void qedr_ll2_release_rx_packet(void *cxt, u8 connection_handle,
 126                                        void *cookie, dma_addr_t rx_buf_addr,
 127                                        bool b_last_packet)
 128 {
 129         /* Do nothing... */
 130 }
 131 
 132 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
 133                                 struct ib_qp_init_attr *attrs)
 134 {
 135         struct qed_rdma_destroy_cq_in_params iparams;
 136         struct qed_rdma_destroy_cq_out_params oparams;
 137         struct qedr_cq *cq;
 138 
 139         cq = get_qedr_cq(attrs->send_cq);
 140         iparams.icid = cq->icid;
 141         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
 142         dev->ops->common->chain_free(dev->cdev, &cq->pbl);
 143 
 144         cq = get_qedr_cq(attrs->recv_cq);
 145         /* if a dedicated recv_cq was used, delete it too */
 146         if (iparams.icid != cq->icid) {
 147                 iparams.icid = cq->icid;
 148                 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
 149                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
 150         }
 151 }
 152 
 153 static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
 154                                           struct ib_qp_init_attr *attrs)
 155 {
 156         if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
 157                 DP_ERR(dev,
 158                        " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
 159                        attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
 160                 return -EINVAL;
 161         }
 162 
 163         if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
 164                 DP_ERR(dev,
 165                        " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
 166                        attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
 167                 return -EINVAL;
 168         }
 169 
 170         if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
 171                 DP_ERR(dev,
 172                        " create gsi qp: failed. max_send_wr is too large %d>%d\n",
 173                        attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
 174                 return -EINVAL;
 175         }
 176 
 177         return 0;
 178 }
 179 
 180 static int qedr_ll2_post_tx(struct qedr_dev *dev,
 181                             struct qed_roce_ll2_packet *pkt)
 182 {
 183         enum qed_ll2_roce_flavor_type roce_flavor;
 184         struct qed_ll2_tx_pkt_info ll2_tx_pkt;
 185         int rc;
 186         int i;
 187 
 188         memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt));
 189 
 190         roce_flavor = (pkt->roce_mode == ROCE_V1) ?
 191             QED_LL2_ROCE : QED_LL2_RROCE;
 192 
 193         if (pkt->roce_mode == ROCE_V2_IPV4)
 194                 ll2_tx_pkt.enable_ip_cksum = 1;
 195 
 196         ll2_tx_pkt.num_of_bds = 1 /* hdr */  + pkt->n_seg;
 197         ll2_tx_pkt.vlan = 0;
 198         ll2_tx_pkt.tx_dest = pkt->tx_dest;
 199         ll2_tx_pkt.qed_roce_flavor = roce_flavor;
 200         ll2_tx_pkt.first_frag = pkt->header.baddr;
 201         ll2_tx_pkt.first_frag_len = pkt->header.len;
 202         ll2_tx_pkt.cookie = pkt;
 203 
 204         /* tx header */
 205         rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx,
 206                                              dev->gsi_ll2_handle,
 207                                              &ll2_tx_pkt, 1);
 208         if (rc) {
 209                 /* TX failed while posting header - release resources */
 210                 dma_free_coherent(&dev->pdev->dev, pkt->header.len,
 211                                   pkt->header.vaddr, pkt->header.baddr);
 212                 kfree(pkt);
 213 
 214                 DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc);
 215                 return rc;
 216         }
 217 
 218         /* tx payload */
 219         for (i = 0; i < pkt->n_seg; i++) {
 220                 rc = dev->ops->ll2_set_fragment_of_tx_packet(
 221                         dev->rdma_ctx,
 222                         dev->gsi_ll2_handle,
 223                         pkt->payload[i].baddr,
 224                         pkt->payload[i].len);
 225 
 226                 if (rc) {
 227                         /* if failed not much to do here, partial packet has
 228                          * been posted we can't free memory, will need to wait
 229                          * for completion
 230                          */
 231                         DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc);
 232                         return rc;
 233                 }
 234         }
 235 
 236         return 0;
 237 }
 238 
 239 static int qedr_ll2_stop(struct qedr_dev *dev)
 240 {
 241         int rc;
 242 
 243         if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE)
 244                 return 0;
 245 
 246         /* remove LL2 MAC address filter */
 247         rc = dev->ops->ll2_set_mac_filter(dev->cdev,
 248                                           dev->gsi_ll2_mac_address, NULL);
 249 
 250         rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx,
 251                                                 dev->gsi_ll2_handle);
 252         if (rc)
 253                 DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc);
 254 
 255         dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
 256 
 257         dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE;
 258 
 259         return rc;
 260 }
 261 
 262 static int qedr_ll2_start(struct qedr_dev *dev,
 263                           struct ib_qp_init_attr *attrs, struct qedr_qp *qp)
 264 {
 265         struct qed_ll2_acquire_data data;
 266         struct qed_ll2_cbs cbs;
 267         int rc;
 268 
 269         /* configure and start LL2 */
 270         cbs.rx_comp_cb = qedr_ll2_complete_rx_packet;
 271         cbs.tx_comp_cb = qedr_ll2_complete_tx_packet;
 272         cbs.rx_release_cb = qedr_ll2_release_rx_packet;
 273         cbs.tx_release_cb = qedr_ll2_complete_tx_packet;
 274         cbs.cookie = dev;
 275 
 276         memset(&data, 0, sizeof(data));
 277         data.input.conn_type = QED_LL2_TYPE_ROCE;
 278         data.input.mtu = dev->ndev->mtu;
 279         data.input.rx_num_desc = attrs->cap.max_recv_wr;
 280         data.input.rx_drop_ttl0_flg = true;
 281         data.input.rx_vlan_removal_en = false;
 282         data.input.tx_num_desc = attrs->cap.max_send_wr;
 283         data.input.tx_tc = 0;
 284         data.input.tx_dest = QED_LL2_TX_DEST_NW;
 285         data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET;
 286         data.input.ai_err_no_buf = QED_LL2_DROP_PACKET;
 287         data.input.gsi_enable = 1;
 288         data.p_connection_handle = &dev->gsi_ll2_handle;
 289         data.cbs = &cbs;
 290 
 291         rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data);
 292         if (rc) {
 293                 DP_ERR(dev,
 294                        "ll2 start: failed to acquire LL2 connection (rc=%d)\n",
 295                        rc);
 296                 return rc;
 297         }
 298 
 299         rc = dev->ops->ll2_establish_connection(dev->rdma_ctx,
 300                                                 dev->gsi_ll2_handle);
 301         if (rc) {
 302                 DP_ERR(dev,
 303                        "ll2 start: failed to establish LL2 connection (rc=%d)\n",
 304                        rc);
 305                 goto err1;
 306         }
 307 
 308         rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr);
 309         if (rc)
 310                 goto err2;
 311 
 312         return 0;
 313 
 314 err2:
 315         dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
 316 err1:
 317         dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle);
 318 
 319         return rc;
 320 }
 321 
 322 struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
 323                                  struct ib_qp_init_attr *attrs,
 324                                  struct qedr_qp *qp)
 325 {
 326         int rc;
 327 
 328         rc = qedr_check_gsi_qp_attrs(dev, attrs);
 329         if (rc)
 330                 return ERR_PTR(rc);
 331 
 332         rc = qedr_ll2_start(dev, attrs, qp);
 333         if (rc) {
 334                 DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
 335                 return ERR_PTR(rc);
 336         }
 337 
 338         /* create QP */
 339         qp->ibqp.qp_num = 1;
 340         qp->rq.max_wr = attrs->cap.max_recv_wr;
 341         qp->sq.max_wr = attrs->cap.max_send_wr;
 342 
 343         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
 344                                 GFP_KERNEL);
 345         if (!qp->rqe_wr_id)
 346                 goto err;
 347         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
 348                                 GFP_KERNEL);
 349         if (!qp->wqe_wr_id)
 350                 goto err;
 351 
 352         qedr_store_gsi_qp_cq(dev, qp, attrs);
 353         ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 354 
 355         /* the GSI CQ is handled by the driver so remove it from the FW */
 356         qedr_destroy_gsi_cq(dev, attrs);
 357         dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
 358         dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
 359 
 360         DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
 361 
 362         return &qp->ibqp;
 363 
 364 err:
 365         kfree(qp->rqe_wr_id);
 366 
 367         rc = qedr_ll2_stop(dev);
 368         if (rc)
 369                 DP_ERR(dev, "create gsi qp: failed destroy on create\n");
 370 
 371         return ERR_PTR(-ENOMEM);
 372 }
 373 
 374 int qedr_destroy_gsi_qp(struct qedr_dev *dev)
 375 {
 376         return qedr_ll2_stop(dev);
 377 }
 378 
 379 #define QEDR_MAX_UD_HEADER_SIZE (100)
 380 #define QEDR_GSI_QPN            (1)
 381 static inline int qedr_gsi_build_header(struct qedr_dev *dev,
 382                                         struct qedr_qp *qp,
 383                                         const struct ib_send_wr *swr,
 384                                         struct ib_ud_header *udh,
 385                                         int *roce_mode)
 386 {
 387         bool has_vlan = false, has_grh_ipv6 = true;
 388         struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
 389         const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
 390         const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
 391         int send_size = 0;
 392         u16 vlan_id = 0;
 393         u16 ether_type;
 394         int rc;
 395         int ip_ver = 0;
 396 
 397         bool has_udp = false;
 398         int i;
 399 
 400         rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
 401         if (rc)
 402                 return rc;
 403 
 404         if (vlan_id < VLAN_CFI_MASK)
 405                 has_vlan = true;
 406 
 407         send_size = 0;
 408         for (i = 0; i < swr->num_sge; ++i)
 409                 send_size += swr->sg_list[i].length;
 410 
 411         has_udp = (sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
 412         if (!has_udp) {
 413                 /* RoCE v1 */
 414                 ether_type = ETH_P_IBOE;
 415                 *roce_mode = ROCE_V1;
 416         } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
 417                 /* RoCE v2 IPv4 */
 418                 ip_ver = 4;
 419                 ether_type = ETH_P_IP;
 420                 has_grh_ipv6 = false;
 421                 *roce_mode = ROCE_V2_IPV4;
 422         } else {
 423                 /* RoCE v2 IPv6 */
 424                 ip_ver = 6;
 425                 ether_type = ETH_P_IPV6;
 426                 *roce_mode = ROCE_V2_IPV6;
 427         }
 428 
 429         rc = ib_ud_header_init(send_size, false, true, has_vlan,
 430                                has_grh_ipv6, ip_ver, has_udp, 0, udh);
 431         if (rc) {
 432                 DP_ERR(dev, "gsi post send: failed to init header\n");
 433                 return rc;
 434         }
 435 
 436         /* ENET + VLAN headers */
 437         ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
 438         ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
 439         if (has_vlan) {
 440                 udh->eth.type = htons(ETH_P_8021Q);
 441                 udh->vlan.tag = htons(vlan_id);
 442                 udh->vlan.type = htons(ether_type);
 443         } else {
 444                 udh->eth.type = htons(ether_type);
 445         }
 446 
 447         /* BTH */
 448         udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
 449         udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
 450         udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
 451         udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
 452         udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
 453 
 454         /* DETH */
 455         udh->deth.qkey = htonl(0x80010000);
 456         udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
 457 
 458         if (has_grh_ipv6) {
 459                 /* GRH / IPv6 header */
 460                 udh->grh.traffic_class = grh->traffic_class;
 461                 udh->grh.flow_label = grh->flow_label;
 462                 udh->grh.hop_limit = grh->hop_limit;
 463                 udh->grh.destination_gid = grh->dgid;
 464                 memcpy(&udh->grh.source_gid.raw, sgid_attr->gid.raw,
 465                        sizeof(udh->grh.source_gid.raw));
 466         } else {
 467                 /* IPv4 header */
 468                 u32 ipv4_addr;
 469 
 470                 udh->ip4.protocol = IPPROTO_UDP;
 471                 udh->ip4.tos = htonl(grh->flow_label);
 472                 udh->ip4.frag_off = htons(IP_DF);
 473                 udh->ip4.ttl = grh->hop_limit;
 474 
 475                 ipv4_addr = qedr_get_ipv4_from_gid(sgid_attr->gid.raw);
 476                 udh->ip4.saddr = ipv4_addr;
 477                 ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
 478                 udh->ip4.daddr = ipv4_addr;
 479                 /* note: checksum is calculated by the device */
 480         }
 481 
 482         /* UDP */
 483         if (has_udp) {
 484                 udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
 485                 udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
 486                 udh->udp.csum = 0;
 487                 /* UDP length is untouched hence is zero */
 488         }
 489         return 0;
 490 }
 491 
 492 static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
 493                                         struct qedr_qp *qp,
 494                                         const struct ib_send_wr *swr,
 495                                         struct qed_roce_ll2_packet **p_packet)
 496 {
 497         u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
 498         struct qed_roce_ll2_packet *packet;
 499         struct pci_dev *pdev = dev->pdev;
 500         int roce_mode, header_size;
 501         struct ib_ud_header udh;
 502         int i, rc;
 503 
 504         *p_packet = NULL;
 505 
 506         rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
 507         if (rc)
 508                 return rc;
 509 
 510         header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
 511 
 512         packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
 513         if (!packet)
 514                 return -ENOMEM;
 515 
 516         packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
 517                                                   &packet->header.baddr,
 518                                                   GFP_ATOMIC);
 519         if (!packet->header.vaddr) {
 520                 kfree(packet);
 521                 return -ENOMEM;
 522         }
 523 
 524         if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
 525                 packet->tx_dest = QED_LL2_TX_DEST_LB;
 526         else
 527                 packet->tx_dest = QED_LL2_TX_DEST_NW;
 528 
 529         packet->roce_mode = roce_mode;
 530         memcpy(packet->header.vaddr, ud_header_buffer, header_size);
 531         packet->header.len = header_size;
 532         packet->n_seg = swr->num_sge;
 533         for (i = 0; i < packet->n_seg; i++) {
 534                 packet->payload[i].baddr = swr->sg_list[i].addr;
 535                 packet->payload[i].len = swr->sg_list[i].length;
 536         }
 537 
 538         *p_packet = packet;
 539 
 540         return 0;
 541 }
 542 
 543 int qedr_gsi_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 544                        const struct ib_send_wr **bad_wr)
 545 {
 546         struct qed_roce_ll2_packet *pkt = NULL;
 547         struct qedr_qp *qp = get_qedr_qp(ibqp);
 548         struct qedr_dev *dev = qp->dev;
 549         unsigned long flags;
 550         int rc;
 551 
 552         if (qp->state != QED_ROCE_QP_STATE_RTS) {
 553                 *bad_wr = wr;
 554                 DP_ERR(dev,
 555                        "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
 556                        qp->state);
 557                 return -EINVAL;
 558         }
 559 
 560         if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
 561                 DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
 562                        wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
 563                 rc = -EINVAL;
 564                 goto err;
 565         }
 566 
 567         if (wr->opcode != IB_WR_SEND) {
 568                 DP_ERR(dev,
 569                        "gsi post send: failed due to unsupported opcode %d\n",
 570                        wr->opcode);
 571                 rc = -EINVAL;
 572                 goto err;
 573         }
 574 
 575         spin_lock_irqsave(&qp->q_lock, flags);
 576 
 577         rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
 578         if (rc) {
 579                 spin_unlock_irqrestore(&qp->q_lock, flags);
 580                 goto err;
 581         }
 582 
 583         rc = qedr_ll2_post_tx(dev, pkt);
 584 
 585         if (!rc) {
 586                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
 587                 qedr_inc_sw_prod(&qp->sq);
 588                 DP_DEBUG(qp->dev, QEDR_MSG_GSI,
 589                          "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
 590                          wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
 591         } else {
 592                 DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
 593                 rc = -EAGAIN;
 594                 *bad_wr = wr;
 595         }
 596 
 597         spin_unlock_irqrestore(&qp->q_lock, flags);
 598 
 599         if (wr->next) {
 600                 DP_ERR(dev,
 601                        "gsi post send: failed second WR. Only one WR may be passed at a time\n");
 602                 *bad_wr = wr->next;
 603                 rc = -EINVAL;
 604         }
 605 
 606         return rc;
 607 
 608 err:
 609         *bad_wr = wr;
 610         return rc;
 611 }
 612 
 613 int qedr_gsi_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
 614                        const struct ib_recv_wr **bad_wr)
 615 {
 616         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
 617         struct qedr_qp *qp = get_qedr_qp(ibqp);
 618         unsigned long flags;
 619         int rc = 0;
 620 
 621         if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
 622             (qp->state != QED_ROCE_QP_STATE_RTS)) {
 623                 *bad_wr = wr;
 624                 DP_ERR(dev,
 625                        "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
 626                        qp->state);
 627                 return -EINVAL;
 628         }
 629 
 630         spin_lock_irqsave(&qp->q_lock, flags);
 631 
 632         while (wr) {
 633                 if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
 634                         DP_ERR(dev,
 635                                "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
 636                                wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
 637                         goto err;
 638                 }
 639 
 640                 rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx,
 641                                                   dev->gsi_ll2_handle,
 642                                                   wr->sg_list[0].addr,
 643                                                   wr->sg_list[0].length,
 644                                                   NULL /* cookie */,
 645                                                   1 /* notify_fw */);
 646                 if (rc) {
 647                         DP_ERR(dev,
 648                                "gsi post recv: failed to post rx buffer (rc=%d)\n",
 649                                rc);
 650                         goto err;
 651                 }
 652 
 653                 memset(&qp->rqe_wr_id[qp->rq.prod], 0,
 654                        sizeof(qp->rqe_wr_id[qp->rq.prod]));
 655                 qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
 656                 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
 657 
 658                 qedr_inc_sw_prod(&qp->rq);
 659 
 660                 wr = wr->next;
 661         }
 662 
 663         spin_unlock_irqrestore(&qp->q_lock, flags);
 664 
 665         return rc;
 666 err:
 667         spin_unlock_irqrestore(&qp->q_lock, flags);
 668         *bad_wr = wr;
 669         return -ENOMEM;
 670 }
 671 
 672 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 673 {
 674         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
 675         struct qedr_cq *cq = get_qedr_cq(ibcq);
 676         struct qedr_qp *qp = dev->gsi_qp;
 677         unsigned long flags;
 678         u16 vlan_id;
 679         int i = 0;
 680 
 681         spin_lock_irqsave(&cq->cq_lock, flags);
 682 
 683         while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
 684                 memset(&wc[i], 0, sizeof(*wc));
 685 
 686                 wc[i].qp = &qp->ibqp;
 687                 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
 688                 wc[i].opcode = IB_WC_RECV;
 689                 wc[i].pkey_index = 0;
 690                 wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
 691                     IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
 692                 /* 0 - currently only one recv sg is supported */
 693                 wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
 694                 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
 695                 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
 696                 wc[i].wc_flags |= IB_WC_WITH_SMAC;
 697 
 698                 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
 699                           VLAN_VID_MASK;
 700                 if (vlan_id) {
 701                         wc[i].wc_flags |= IB_WC_WITH_VLAN;
 702                         wc[i].vlan_id = vlan_id;
 703                         wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
 704                                     VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 705                 }
 706 
 707                 qedr_inc_sw_cons(&qp->rq);
 708                 i++;
 709         }
 710 
 711         while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
 712                 memset(&wc[i], 0, sizeof(*wc));
 713 
 714                 wc[i].qp = &qp->ibqp;
 715                 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
 716                 wc[i].opcode = IB_WC_SEND;
 717                 wc[i].status = IB_WC_SUCCESS;
 718 
 719                 qedr_inc_sw_cons(&qp->sq);
 720                 i++;
 721         }
 722 
 723         spin_unlock_irqrestore(&cq->cq_lock, flags);
 724 
 725         DP_DEBUG(dev, QEDR_MSG_GSI,
 726                  "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
 727                  num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
 728                  qp->sq.gsi_cons, qp->ibqp.qp_num);
 729 
 730         return i;
 731 }

/* [<][>][^][v][top][bottom][index][help] */