root/drivers/infiniband/hw/i40iw/i40iw_puda.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. i40iw_puda_get_listbuf
  2. i40iw_puda_get_bufpool
  3. i40iw_puda_ret_bufpool
  4. i40iw_puda_post_recvbuf
  5. i40iw_puda_replenish_rq
  6. i40iw_puda_alloc_buf
  7. i40iw_puda_dele_buf
  8. i40iw_puda_get_next_send_wqe
  9. i40iw_puda_poll_info
  10. i40iw_puda_poll_completion
  11. i40iw_puda_send
  12. i40iw_puda_send_buf
  13. i40iw_puda_qp_setctx
  14. i40iw_puda_qp_wqe
  15. i40iw_puda_qp_create
  16. i40iw_puda_cq_wqe
  17. i40iw_puda_cq_create
  18. i40iw_puda_free_qp
  19. i40iw_puda_free_cq
  20. i40iw_puda_dele_resources
  21. i40iw_puda_allocbufs
  22. i40iw_puda_create_rsrc
  23. i40iw_ilq_putback_rcvbuf
  24. i40iw_ieq_get_fpdu_length
  25. i40iw_ieq_copy_to_txbuf
  26. i40iw_ieq_setup_tx_buf
  27. i40iw_ieq_check_first_buf
  28. i40iw_ieq_compl_pfpdu
  29. i40iw_ieq_create_pbufl
  30. i40iw_ieq_handle_partial
  31. i40iw_ieq_process_buf
  32. i40iw_ieq_process_fpdus
  33. i40iw_ieq_handle_exception
  34. i40iw_ieq_receive
  35. i40iw_ieq_tx_compl
  36. i40iw_ieq_cleanup_qp

   1 /*******************************************************************************
   2 *
   3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenFabrics.org BSD license below:
  10 *
  11 *   Redistribution and use in source and binary forms, with or
  12 *   without modification, are permitted provided that the following
  13 *   conditions are met:
  14 *
  15 *    - Redistributions of source code must retain the above
  16 *       copyright notice, this list of conditions and the following
  17 *       disclaimer.
  18 *
  19 *    - Redistributions in binary form must reproduce the above
  20 *       copyright notice, this list of conditions and the following
  21 *       disclaimer in the documentation and/or other materials
  22 *       provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 *
  33 *******************************************************************************/
  34 
  35 #include "i40iw_osdep.h"
  36 #include "i40iw_register.h"
  37 #include "i40iw_status.h"
  38 #include "i40iw_hmc.h"
  39 
  40 #include "i40iw_d.h"
  41 #include "i40iw_type.h"
  42 #include "i40iw_p.h"
  43 #include "i40iw_puda.h"
  44 
  45 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
  46                               struct i40iw_puda_buf *buf);
  47 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
  48 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
  49 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
  50                                                       *rsrc, bool initial);
  51 /**
  52  * i40iw_puda_get_listbuf - get buffer from puda list
  53  * @list: list to use for buffers (ILQ or IEQ)
  54  */
  55 static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
  56 {
  57         struct i40iw_puda_buf *buf = NULL;
  58 
  59         if (!list_empty(list)) {
  60                 buf = (struct i40iw_puda_buf *)list->next;
  61                 list_del((struct list_head *)&buf->list);
  62         }
  63         return buf;
  64 }
  65 
  66 /**
  67  * i40iw_puda_get_bufpool - return buffer from resource
  68  * @rsrc: resource to use for buffer
  69  */
  70 struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
  71 {
  72         struct i40iw_puda_buf *buf = NULL;
  73         struct list_head *list = &rsrc->bufpool;
  74         unsigned long   flags;
  75 
  76         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  77         buf = i40iw_puda_get_listbuf(list);
  78         if (buf)
  79                 rsrc->avail_buf_count--;
  80         else
  81                 rsrc->stats_buf_alloc_fail++;
  82         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  83         return buf;
  84 }
  85 
  86 /**
  87  * i40iw_puda_ret_bufpool - return buffer to rsrc list
  88  * @rsrc: resource to use for buffer
  89  * @buf: buffe to return to resouce
  90  */
  91 void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
  92                             struct i40iw_puda_buf *buf)
  93 {
  94         unsigned long   flags;
  95 
  96         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
  97         list_add(&buf->list, &rsrc->bufpool);
  98         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
  99         rsrc->avail_buf_count++;
 100 }
 101 
 102 /**
 103  * i40iw_puda_post_recvbuf - set wqe for rcv buffer
 104  * @rsrc: resource ptr
 105  * @wqe_idx: wqe index to use
 106  * @buf: puda buffer for rcv q
 107  * @initial: flag if during init time
 108  */
 109 static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
 110                                     struct i40iw_puda_buf *buf, bool initial)
 111 {
 112         u64 *wqe;
 113         struct i40iw_sc_qp *qp = &rsrc->qp;
 114         u64 offset24 = 0;
 115 
 116         qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
 117         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
 118         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 119                     "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
 120                     wqe_idx, buf, wqe);
 121         if (!initial)
 122                 get_64bit_val(wqe, 24, &offset24);
 123 
 124         offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
 125 
 126         set_64bit_val(wqe, 0, buf->mem.pa);
 127         set_64bit_val(wqe, 8,
 128                       LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
 129         i40iw_insert_wqe_hdr(wqe, offset24);
 130 }
 131 
 132 /**
 133  * i40iw_puda_replenish_rq - post rcv buffers
 134  * @rsrc: resource to use for buffer
 135  * @initial: flag if during init time
 136  */
 137 static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
 138                                                       bool initial)
 139 {
 140         u32 i;
 141         u32 invalid_cnt = rsrc->rxq_invalid_cnt;
 142         struct i40iw_puda_buf *buf = NULL;
 143 
 144         for (i = 0; i < invalid_cnt; i++) {
 145                 buf = i40iw_puda_get_bufpool(rsrc);
 146                 if (!buf)
 147                         return I40IW_ERR_list_empty;
 148                 i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
 149                                         initial);
 150                 rsrc->rx_wqe_idx =
 151                     ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
 152                 rsrc->rxq_invalid_cnt--;
 153         }
 154         return 0;
 155 }
 156 
 157 /**
 158  * i40iw_puda_alloc_buf - allocate mem for buffer
 159  * @dev: iwarp device
 160  * @length: length of buffer
 161  */
 162 static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
 163                                                    u32 length)
 164 {
 165         struct i40iw_puda_buf *buf = NULL;
 166         struct i40iw_virt_mem buf_mem;
 167         enum i40iw_status_code ret;
 168 
 169         ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
 170                                       sizeof(struct i40iw_puda_buf));
 171         if (ret) {
 172                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
 173                             "%s: error mem for buf\n", __func__);
 174                 return NULL;
 175         }
 176         buf = (struct i40iw_puda_buf *)buf_mem.va;
 177         ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
 178         if (ret) {
 179                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
 180                             "%s: error dma mem for buf\n", __func__);
 181                 i40iw_free_virt_mem(dev->hw, &buf_mem);
 182                 return NULL;
 183         }
 184         buf->buf_mem.va = buf_mem.va;
 185         buf->buf_mem.size = buf_mem.size;
 186         return buf;
 187 }
 188 
 189 /**
 190  * i40iw_puda_dele_buf - delete buffer back to system
 191  * @dev: iwarp device
 192  * @buf: buffer to free
 193  */
 194 static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
 195                                 struct i40iw_puda_buf *buf)
 196 {
 197         i40iw_free_dma_mem(dev->hw, &buf->mem);
 198         i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
 199 }
 200 
 201 /**
 202  * i40iw_puda_get_next_send_wqe - return next wqe for processing
 203  * @qp: puda qp for wqe
 204  * @wqe_idx: wqe index for caller
 205  */
 206 static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
 207 {
 208         u64 *wqe = NULL;
 209         enum i40iw_status_code ret_code = 0;
 210 
 211         *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
 212         if (!*wqe_idx)
 213                 qp->swqe_polarity = !qp->swqe_polarity;
 214         I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
 215         if (ret_code)
 216                 return wqe;
 217         wqe = qp->sq_base[*wqe_idx].elem;
 218 
 219         return wqe;
 220 }
 221 
 222 /**
 223  * i40iw_puda_poll_info - poll cq for completion
 224  * @cq: cq for poll
 225  * @info: info return for successful completion
 226  */
 227 static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
 228                                                    struct i40iw_puda_completion_info *info)
 229 {
 230         u64 qword0, qword2, qword3;
 231         u64 *cqe;
 232         u64 comp_ctx;
 233         bool valid_bit;
 234         u32 major_err, minor_err;
 235         bool error;
 236 
 237         cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
 238         get_64bit_val(cqe, 24, &qword3);
 239         valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
 240 
 241         if (valid_bit != cq->cq_uk.polarity)
 242                 return I40IW_ERR_QUEUE_EMPTY;
 243 
 244         i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
 245         error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
 246         if (error) {
 247                 i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
 248                 major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
 249                 minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
 250                 info->compl_error = major_err << 16 | minor_err;
 251                 return I40IW_ERR_CQ_COMPL_ERROR;
 252         }
 253 
 254         get_64bit_val(cqe, 0, &qword0);
 255         get_64bit_val(cqe, 16, &qword2);
 256 
 257         info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
 258         info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
 259 
 260         get_64bit_val(cqe, 8, &comp_ctx);
 261         info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
 262         info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
 263 
 264         if (info->q_type == I40IW_CQE_QTYPE_RQ) {
 265                 info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
 266                 info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
 267                 info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
 268                 info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
 269         }
 270 
 271         return 0;
 272 }
 273 
 274 /**
 275  * i40iw_puda_poll_completion - processes completion for cq
 276  * @dev: iwarp device
 277  * @cq: cq getting interrupt
 278  * @compl_err: return any completion err
 279  */
 280 enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
 281                                                   struct i40iw_sc_cq *cq, u32 *compl_err)
 282 {
 283         struct i40iw_qp_uk *qp;
 284         struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
 285         struct i40iw_puda_completion_info info;
 286         enum i40iw_status_code ret = 0;
 287         struct i40iw_puda_buf *buf;
 288         struct i40iw_puda_rsrc *rsrc;
 289         void *sqwrid;
 290         u8 cq_type = cq->cq_type;
 291         unsigned long   flags;
 292 
 293         if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
 294                 rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq;
 295         } else {
 296                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
 297                 return I40IW_ERR_BAD_PTR;
 298         }
 299         memset(&info, 0, sizeof(info));
 300         ret = i40iw_puda_poll_info(cq, &info);
 301         *compl_err = info.compl_error;
 302         if (ret == I40IW_ERR_QUEUE_EMPTY)
 303                 return ret;
 304         if (ret)
 305                 goto done;
 306 
 307         qp = info.qp;
 308         if (!qp || !rsrc) {
 309                 ret = I40IW_ERR_BAD_PTR;
 310                 goto done;
 311         }
 312 
 313         if (qp->qp_id != rsrc->qp_id) {
 314                 ret = I40IW_ERR_BAD_PTR;
 315                 goto done;
 316         }
 317 
 318         if (info.q_type == I40IW_CQE_QTYPE_RQ) {
 319                 buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
 320                 /* Get all the tcpip information in the buf header */
 321                 ret = i40iw_puda_get_tcpip_info(&info, buf);
 322                 if (ret) {
 323                         rsrc->stats_rcvd_pkt_err++;
 324                         if (cq_type == I40IW_CQ_TYPE_ILQ) {
 325                                 i40iw_ilq_putback_rcvbuf(&rsrc->qp,
 326                                                          info.wqe_idx);
 327                         } else {
 328                                 i40iw_puda_ret_bufpool(rsrc, buf);
 329                                 i40iw_puda_replenish_rq(rsrc, false);
 330                         }
 331                         goto done;
 332                 }
 333 
 334                 rsrc->stats_pkt_rcvd++;
 335                 rsrc->compl_rxwqe_idx = info.wqe_idx;
 336                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
 337                 rsrc->receive(rsrc->vsi, buf);
 338                 if (cq_type == I40IW_CQ_TYPE_ILQ)
 339                         i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
 340                 else
 341                         i40iw_puda_replenish_rq(rsrc, false);
 342 
 343         } else {
 344                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
 345                 sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
 346                 I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
 347                 rsrc->xmit_complete(rsrc->vsi, sqwrid);
 348                 spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 349                 rsrc->tx_wqe_avail_cnt++;
 350                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 351                 if (!list_empty(&rsrc->txpend))
 352                         i40iw_puda_send_buf(rsrc, NULL);
 353         }
 354 
 355 done:
 356         I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
 357         if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
 358                 cq_uk->polarity = !cq_uk->polarity;
 359         /* update cq tail in cq shadow memory also */
 360         I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
 361         set_64bit_val(cq_uk->shadow_area, 0,
 362                       I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
 363         return 0;
 364 }
 365 
 366 /**
 367  * i40iw_puda_send - complete send wqe for transmit
 368  * @qp: puda qp for send
 369  * @info: buffer information for transmit
 370  */
 371 enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
 372                                        struct i40iw_puda_send_info *info)
 373 {
 374         u64 *wqe;
 375         u32 iplen, l4len;
 376         u64 header[2];
 377         u32 wqe_idx;
 378         u8 iipt;
 379 
 380         /* number of 32 bits DWORDS in header */
 381         l4len = info->tcplen >> 2;
 382         if (info->ipv4) {
 383                 iipt = 3;
 384                 iplen = 5;
 385         } else {
 386                 iipt = 1;
 387                 iplen = 10;
 388         }
 389 
 390         wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
 391         if (!wqe)
 392                 return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
 393         qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
 394         /* Third line of WQE descriptor */
 395         /* maclen is in words */
 396         header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
 397                     LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
 398                     LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
 399                     LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
 400         /* Forth line of WQE descriptor */
 401         header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
 402                     LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
 403                     LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
 404                     LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
 405 
 406         set_64bit_val(wqe, 0, info->paddr);
 407         set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
 408         set_64bit_val(wqe, 16, header[0]);
 409 
 410         i40iw_insert_wqe_hdr(wqe, header[1]);
 411 
 412         i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
 413         i40iw_qp_post_wr(&qp->qp_uk);
 414         return 0;
 415 }
 416 
 417 /**
 418  * i40iw_puda_send_buf - transmit puda buffer
 419  * @rsrc: resource to use for buffer
 420  * @buf: puda buffer to transmit
 421  */
 422 void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
 423 {
 424         struct i40iw_puda_send_info info;
 425         enum i40iw_status_code ret = 0;
 426         unsigned long   flags;
 427 
 428         spin_lock_irqsave(&rsrc->bufpool_lock, flags);
 429         /* if no wqe available or not from a completion and we have
 430          * pending buffers, we must queue new buffer
 431          */
 432         if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
 433                 list_add_tail(&buf->list, &rsrc->txpend);
 434                 spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 435                 rsrc->stats_sent_pkt_q++;
 436                 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
 437                         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 438                                     "%s: adding to txpend\n", __func__);
 439                 return;
 440         }
 441         rsrc->tx_wqe_avail_cnt--;
 442         /* if we are coming from a completion and have pending buffers
 443          * then Get one from pending list
 444          */
 445         if (!buf) {
 446                 buf = i40iw_puda_get_listbuf(&rsrc->txpend);
 447                 if (!buf)
 448                         goto done;
 449         }
 450 
 451         info.scratch = (void *)buf;
 452         info.paddr = buf->mem.pa;
 453         info.len = buf->totallen;
 454         info.tcplen = buf->tcphlen;
 455         info.maclen = buf->maclen;
 456         info.ipv4 = buf->ipv4;
 457         info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
 458 
 459         ret = i40iw_puda_send(&rsrc->qp, &info);
 460         if (ret) {
 461                 rsrc->tx_wqe_avail_cnt++;
 462                 rsrc->stats_sent_pkt_q++;
 463                 list_add(&buf->list, &rsrc->txpend);
 464                 if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
 465                         i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
 466                                     "%s: adding to puda_send\n", __func__);
 467         } else {
 468                 rsrc->stats_pkt_sent++;
 469         }
 470 done:
 471         spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
 472 }
 473 
 474 /**
 475  * i40iw_puda_qp_setctx - during init, set qp's context
 476  * @rsrc: qp's resource
 477  */
 478 static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
 479 {
 480         struct i40iw_sc_qp *qp = &rsrc->qp;
 481         u64 *qp_ctx = qp->hw_host_ctx;
 482 
 483         set_64bit_val(qp_ctx, 8, qp->sq_pa);
 484         set_64bit_val(qp_ctx, 16, qp->rq_pa);
 485 
 486         set_64bit_val(qp_ctx, 24,
 487                       LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
 488                       LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
 489 
 490         set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE));
 491         set_64bit_val(qp_ctx, 56, 0);
 492         set_64bit_val(qp_ctx, 64, 1);
 493 
 494         set_64bit_val(qp_ctx, 136,
 495                       LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
 496                       LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
 497 
 498         set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
 499 
 500         set_64bit_val(qp_ctx, 168,
 501                       LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
 502 
 503         set_64bit_val(qp_ctx, 176,
 504                       LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
 505                       LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
 506                       LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
 507 
 508         i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
 509                         qp_ctx, I40IW_QP_CTX_SIZE);
 510 }
 511 
 512 /**
 513  * i40iw_puda_qp_wqe - setup wqe for qp create
 514  * @rsrc: resource for qp
 515  */
 516 static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
 517 {
 518         struct i40iw_sc_cqp *cqp;
 519         u64 *wqe;
 520         u64 header;
 521         struct i40iw_ccq_cqe_info compl_info;
 522         enum i40iw_status_code status = 0;
 523 
 524         cqp = dev->cqp;
 525         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
 526         if (!wqe)
 527                 return I40IW_ERR_RING_FULL;
 528 
 529         set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
 530         set_64bit_val(wqe, 40, qp->shadow_area_pa);
 531         header = qp->qp_uk.qp_id |
 532                  LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
 533                  LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
 534                  LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
 535                  LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
 536                  LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 537 
 538         i40iw_insert_wqe_hdr(wqe, header);
 539 
 540         i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
 541         i40iw_sc_cqp_post_sq(cqp);
 542         status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 543                                                     I40IW_CQP_OP_CREATE_QP,
 544                                                     &compl_info);
 545         return status;
 546 }
 547 
 548 /**
 549  * i40iw_puda_qp_create - create qp for resource
 550  * @rsrc: resource to use for buffer
 551  */
 552 static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
 553 {
 554         struct i40iw_sc_qp *qp = &rsrc->qp;
 555         struct i40iw_qp_uk *ukqp = &qp->qp_uk;
 556         enum i40iw_status_code ret = 0;
 557         u32 sq_size, rq_size, t_size;
 558         struct i40iw_dma_mem *mem;
 559 
 560         sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
 561         rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
 562         t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
 563                   I40IW_QP_CTX_SIZE);
 564         /* Get page aligned memory */
 565         ret =
 566             i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
 567                                    I40IW_HW_PAGE_SIZE);
 568         if (ret) {
 569                 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
 570                 return ret;
 571         }
 572 
 573         mem = &rsrc->qpmem;
 574         memset(mem->va, 0, t_size);
 575         qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
 576         qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
 577         qp->pd = &rsrc->sc_pd;
 578         qp->qp_type = I40IW_QP_TYPE_UDA;
 579         qp->dev = rsrc->dev;
 580         qp->back_qp = (void *)rsrc;
 581         qp->sq_pa = mem->pa;
 582         qp->rq_pa = qp->sq_pa + sq_size;
 583         qp->vsi = rsrc->vsi;
 584         ukqp->sq_base = mem->va;
 585         ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
 586         ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
 587         qp->shadow_area_pa = qp->rq_pa + rq_size;
 588         qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
 589         qp->hw_host_ctx_pa =
 590                 qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
 591         ukqp->qp_id = rsrc->qp_id;
 592         ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
 593         ukqp->rq_wrid_array = rsrc->rq_wrid_array;
 594 
 595         ukqp->qp_id = rsrc->qp_id;
 596         ukqp->sq_size = rsrc->sq_size;
 597         ukqp->rq_size = rsrc->rq_size;
 598 
 599         I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
 600         I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
 601         I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
 602 
 603         if (qp->pd->dev->is_pf)
 604                 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
 605                                                     I40E_PFPE_WQEALLOC);
 606         else
 607                 ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
 608                                                     I40E_VFPE_WQEALLOC1);
 609 
 610         qp->user_pri = 0;
 611         i40iw_qp_add_qos(qp);
 612         i40iw_puda_qp_setctx(rsrc);
 613         if (rsrc->dev->ceq_valid)
 614                 ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp);
 615         else
 616                 ret = i40iw_puda_qp_wqe(rsrc->dev, qp);
 617         if (ret) {
 618                 i40iw_qp_rem_qos(qp);
 619                 i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
 620         }
 621         return ret;
 622 }
 623 
 624 /**
 625  * i40iw_puda_cq_wqe - setup wqe for cq create
 626  * @rsrc: resource for cq
 627  */
 628 static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
 629 {
 630         u64 *wqe;
 631         struct i40iw_sc_cqp *cqp;
 632         u64 header;
 633         struct i40iw_ccq_cqe_info compl_info;
 634         enum i40iw_status_code status = 0;
 635 
 636         cqp = dev->cqp;
 637         wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
 638         if (!wqe)
 639                 return I40IW_ERR_RING_FULL;
 640 
 641         set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
 642         set_64bit_val(wqe, 8, RS_64_1(cq, 1));
 643         set_64bit_val(wqe, 16,
 644                       LS_64(cq->shadow_read_threshold,
 645                             I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
 646         set_64bit_val(wqe, 32, cq->cq_pa);
 647 
 648         set_64bit_val(wqe, 40, cq->shadow_area_pa);
 649 
 650         header = cq->cq_uk.cq_id |
 651             LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
 652             LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
 653             LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
 654             LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
 655             LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 656         i40iw_insert_wqe_hdr(wqe, header);
 657 
 658         i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
 659                         wqe, I40IW_CQP_WQE_SIZE * 8);
 660 
 661         i40iw_sc_cqp_post_sq(dev->cqp);
 662         status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 663                                                  I40IW_CQP_OP_CREATE_CQ,
 664                                                  &compl_info);
 665         return status;
 666 }
 667 
 668 /**
 669  * i40iw_puda_cq_create - create cq for resource
 670  * @rsrc: resource for which cq to create
 671  */
 672 static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
 673 {
 674         struct i40iw_sc_dev *dev = rsrc->dev;
 675         struct i40iw_sc_cq *cq = &rsrc->cq;
 676         enum i40iw_status_code ret = 0;
 677         u32 tsize, cqsize;
 678         struct i40iw_dma_mem *mem;
 679         struct i40iw_cq_init_info info;
 680         struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
 681 
 682         cq->vsi = rsrc->vsi;
 683         cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
 684         tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
 685         ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
 686                                      I40IW_CQ0_ALIGNMENT);
 687         if (ret)
 688                 return ret;
 689 
 690         mem = &rsrc->cqmem;
 691         memset(&info, 0, sizeof(info));
 692         info.dev = dev;
 693         info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
 694                          I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
 695         info.shadow_read_threshold = rsrc->cq_size >> 2;
 696         info.ceq_id_valid = true;
 697         info.cq_base_pa = mem->pa;
 698         info.shadow_area_pa = mem->pa + cqsize;
 699         init_info->cq_base = mem->va;
 700         init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
 701         init_info->cq_size = rsrc->cq_size;
 702         init_info->cq_id = rsrc->cq_id;
 703         info.ceqe_mask = true;
 704         info.ceq_id_valid = true;
 705         ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
 706         if (ret)
 707                 goto error;
 708         if (rsrc->dev->ceq_valid)
 709                 ret = i40iw_cqp_cq_create_cmd(dev, cq);
 710         else
 711                 ret = i40iw_puda_cq_wqe(dev, cq);
 712 error:
 713         if (ret)
 714                 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
 715         return ret;
 716 }
 717 
 718 /**
 719  * i40iw_puda_free_qp - free qp for resource
 720  * @rsrc: resource for which qp to free
 721  */
 722 static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc)
 723 {
 724         enum i40iw_status_code ret;
 725         struct i40iw_ccq_cqe_info compl_info;
 726         struct i40iw_sc_dev *dev = rsrc->dev;
 727 
 728         if (rsrc->dev->ceq_valid) {
 729                 i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp);
 730                 return;
 731         }
 732 
 733         ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
 734                         0, false, true, true);
 735         if (ret)
 736                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
 737                             "%s error puda qp destroy wqe\n",
 738                             __func__);
 739 
 740         if (!ret) {
 741                 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 742                                 I40IW_CQP_OP_DESTROY_QP,
 743                                 &compl_info);
 744                 if (ret)
 745                         i40iw_debug(dev, I40IW_DEBUG_PUDA,
 746                                     "%s error puda qp destroy failed\n",
 747                                     __func__);
 748         }
 749 }
 750 
 751 /**
 752  * i40iw_puda_free_cq - free cq for resource
 753  * @rsrc: resource for which cq to free
 754  */
 755 static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc)
 756 {
 757         enum i40iw_status_code ret;
 758         struct i40iw_ccq_cqe_info compl_info;
 759         struct i40iw_sc_dev *dev = rsrc->dev;
 760 
 761         if (rsrc->dev->ceq_valid) {
 762                 i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq);
 763                 return;
 764         }
 765         ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
 766 
 767         if (ret)
 768                 i40iw_debug(dev, I40IW_DEBUG_PUDA,
 769                             "%s error ieq cq destroy\n",
 770                             __func__);
 771 
 772         if (!ret) {
 773                 ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
 774                                 I40IW_CQP_OP_DESTROY_CQ,
 775                                 &compl_info);
 776                 if (ret)
 777                         i40iw_debug(dev, I40IW_DEBUG_PUDA,
 778                                     "%s error ieq qp destroy done\n",
 779                                     __func__);
 780         }
 781 }
 782 
 783 /**
 784  * i40iw_puda_dele_resources - delete all resources during close
 785  * @dev: iwarp device
 786  * @type: type of resource to dele
 787  * @reset: true if reset chip
 788  */
 789 void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi,
 790                                enum puda_resource_type type,
 791                                bool reset)
 792 {
 793         struct i40iw_sc_dev *dev = vsi->dev;
 794         struct i40iw_puda_rsrc *rsrc;
 795         struct i40iw_puda_buf *buf = NULL;
 796         struct i40iw_puda_buf *nextbuf = NULL;
 797         struct i40iw_virt_mem *vmem;
 798 
 799         switch (type) {
 800         case I40IW_PUDA_RSRC_TYPE_ILQ:
 801                 rsrc = vsi->ilq;
 802                 vmem = &vsi->ilq_mem;
 803                 break;
 804         case I40IW_PUDA_RSRC_TYPE_IEQ:
 805                 rsrc = vsi->ieq;
 806                 vmem = &vsi->ieq_mem;
 807                 break;
 808         default:
 809                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
 810                             __func__, type);
 811                 return;
 812         }
 813 
 814         switch (rsrc->completion) {
 815         case PUDA_HASH_CRC_COMPLETE:
 816                 i40iw_free_hash_desc(rsrc->hash_desc);
 817                 /* fall through */
 818         case PUDA_QP_CREATED:
 819                 if (!reset)
 820                         i40iw_puda_free_qp(rsrc);
 821 
 822                 i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
 823                 /* fallthrough */
 824         case PUDA_CQ_CREATED:
 825                 if (!reset)
 826                         i40iw_puda_free_cq(rsrc);
 827 
 828                 i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
 829                 break;
 830         default:
 831                 i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
 832                 break;
 833         }
 834         /* Free all allocated puda buffers for both tx and rx */
 835         buf = rsrc->alloclist;
 836         while (buf) {
 837                 nextbuf = buf->next;
 838                 i40iw_puda_dele_buf(dev, buf);
 839                 buf = nextbuf;
 840                 rsrc->alloc_buf_count--;
 841         }
 842         i40iw_free_virt_mem(dev->hw, vmem);
 843 }
 844 
 845 /**
 846  * i40iw_puda_allocbufs - allocate buffers for resource
 847  * @rsrc: resource for buffer allocation
 848  * @count: number of buffers to create
 849  */
 850 static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
 851                                                    u32 count)
 852 {
 853         u32 i;
 854         struct i40iw_puda_buf *buf;
 855         struct i40iw_puda_buf *nextbuf;
 856 
 857         for (i = 0; i < count; i++) {
 858                 buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
 859                 if (!buf) {
 860                         rsrc->stats_buf_alloc_fail++;
 861                         return I40IW_ERR_NO_MEMORY;
 862                 }
 863                 i40iw_puda_ret_bufpool(rsrc, buf);
 864                 rsrc->alloc_buf_count++;
 865                 if (!rsrc->alloclist) {
 866                         rsrc->alloclist = buf;
 867                 } else {
 868                         nextbuf = rsrc->alloclist;
 869                         rsrc->alloclist = buf;
 870                         buf->next = nextbuf;
 871                 }
 872         }
 873         rsrc->avail_buf_count = rsrc->alloc_buf_count;
 874         return 0;
 875 }
 876 
 877 /**
 878  * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
 879  * @dev: iwarp device
 880  * @info: resource information
 881  */
 882 enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi,
 883                                               struct i40iw_puda_rsrc_info *info)
 884 {
 885         struct i40iw_sc_dev *dev = vsi->dev;
 886         enum i40iw_status_code ret = 0;
 887         struct i40iw_puda_rsrc *rsrc;
 888         u32 pudasize;
 889         u32 sqwridsize, rqwridsize;
 890         struct i40iw_virt_mem *vmem;
 891 
 892         info->count = 1;
 893         pudasize = sizeof(struct i40iw_puda_rsrc);
 894         sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
 895         rqwridsize = info->rq_size * 8;
 896         switch (info->type) {
 897         case I40IW_PUDA_RSRC_TYPE_ILQ:
 898                 vmem = &vsi->ilq_mem;
 899                 break;
 900         case I40IW_PUDA_RSRC_TYPE_IEQ:
 901                 vmem = &vsi->ieq_mem;
 902                 break;
 903         default:
 904                 return I40IW_NOT_SUPPORTED;
 905         }
 906         ret =
 907             i40iw_allocate_virt_mem(dev->hw, vmem,
 908                                     pudasize + sqwridsize + rqwridsize);
 909         if (ret)
 910                 return ret;
 911         rsrc = (struct i40iw_puda_rsrc *)vmem->va;
 912         spin_lock_init(&rsrc->bufpool_lock);
 913         if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
 914                 vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va;
 915                 vsi->ilq_count = info->count;
 916                 rsrc->receive = info->receive;
 917                 rsrc->xmit_complete = info->xmit_complete;
 918         } else {
 919                 vmem = &vsi->ieq_mem;
 920                 vsi->ieq_count = info->count;
 921                 vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va;
 922                 rsrc->receive = i40iw_ieq_receive;
 923                 rsrc->xmit_complete = i40iw_ieq_tx_compl;
 924         }
 925 
 926         rsrc->type = info->type;
 927         rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
 928         rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
 929         /* Initialize all ieq lists */
 930         INIT_LIST_HEAD(&rsrc->bufpool);
 931         INIT_LIST_HEAD(&rsrc->txpend);
 932 
 933         rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
 934         dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1);
 935         rsrc->qp_id = info->qp_id;
 936         rsrc->cq_id = info->cq_id;
 937         rsrc->sq_size = info->sq_size;
 938         rsrc->rq_size = info->rq_size;
 939         rsrc->cq_size = info->rq_size + info->sq_size;
 940         rsrc->buf_size = info->buf_size;
 941         rsrc->dev = dev;
 942         rsrc->vsi = vsi;
 943 
 944         ret = i40iw_puda_cq_create(rsrc);
 945         if (!ret) {
 946                 rsrc->completion = PUDA_CQ_CREATED;
 947                 ret = i40iw_puda_qp_create(rsrc);
 948         }
 949         if (ret) {
 950                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n",
 951                             __func__);
 952                 goto error;
 953         }
 954         rsrc->completion = PUDA_QP_CREATED;
 955 
 956         ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
 957         if (ret) {
 958                 i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error alloc_buf\n",
 959                             __func__);
 960                 goto error;
 961         }
 962 
 963         rsrc->rxq_invalid_cnt = info->rq_size;
 964         ret = i40iw_puda_replenish_rq(rsrc, true);
 965         if (ret)
 966                 goto error;
 967 
 968         if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
 969                 if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
 970                         rsrc->check_crc = true;
 971                         rsrc->completion = PUDA_HASH_CRC_COMPLETE;
 972                         ret = 0;
 973                 }
 974         }
 975 
 976         dev->ccq_ops->ccq_arm(&rsrc->cq);
 977         return ret;
 978  error:
 979         i40iw_puda_dele_resources(vsi, info->type, false);
 980 
 981         return ret;
 982 }
 983 
 984 /**
 985  * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
 986  * @qp: ilq's qp resource
 987  * @wqe_idx:  wqe index of completed rcvbuf
 988  */
 989 static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
 990 {
 991         u64 *wqe;
 992         u64 offset24;
 993 
 994         wqe = qp->qp_uk.rq_base[wqe_idx].elem;
 995         get_64bit_val(wqe, 24, &offset24);
 996         offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
 997         set_64bit_val(wqe, 24, offset24);
 998 }
 999 
1000 /**
1001  * i40iw_ieq_get_fpdu - given length return fpdu length
1002  * @length: length if fpdu
1003  */
1004 static u16 i40iw_ieq_get_fpdu_length(u16 length)
1005 {
1006         u16 fpdu_len;
1007 
1008         fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
1009         fpdu_len = (fpdu_len + 3) & 0xfffffffc;
1010         return fpdu_len;
1011 }
1012 
1013 /**
1014  * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
1015  * @buf: rcv buffer with partial
1016  * @txbuf: tx buffer for sendign back
1017  * @buf_offset: rcv buffer offset to copy from
1018  * @txbuf_offset: at offset in tx buf to copy
1019  * @length: length of data to copy
1020  */
1021 static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
1022                                     struct i40iw_puda_buf *txbuf,
1023                                     u16 buf_offset, u32 txbuf_offset,
1024                                     u32 length)
1025 {
1026         void *mem1 = (u8 *)buf->mem.va + buf_offset;
1027         void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
1028 
1029         memcpy(mem2, mem1, length);
1030 }
1031 
1032 /**
1033  * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
1034  * @buf: reeive buffer with partial
1035  * @txbuf: buffer to prepare
1036  */
1037 static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
1038                                    struct i40iw_puda_buf *txbuf)
1039 {
1040         txbuf->maclen = buf->maclen;
1041         txbuf->tcphlen = buf->tcphlen;
1042         txbuf->ipv4 = buf->ipv4;
1043         txbuf->hdrlen = buf->hdrlen;
1044         i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
1045 }
1046 
1047 /**
1048  * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
1049  * @buf: receive exception buffer
1050  * @fps: first partial sequence number
1051  */
1052 static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
1053 {
1054         u32 offset;
1055 
1056         if (buf->seqnum < fps) {
1057                 offset = fps - buf->seqnum;
1058                 if (offset > buf->datalen)
1059                         return;
1060                 buf->data += offset;
1061                 buf->datalen -= (u16)offset;
1062                 buf->seqnum = fps;
1063         }
1064 }
1065 
1066 /**
1067  * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
1068  * @ieq: ieq resource
1069  * @rxlist: ieq's received buffer list
1070  * @pbufl: temporary list for buffers for fpddu
1071  * @txbuf: tx buffer for fpdu
1072  * @fpdu_len: total length of fpdu
1073  */
1074 static void  i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
1075                                    struct list_head *rxlist,
1076                                    struct list_head *pbufl,
1077                                    struct i40iw_puda_buf *txbuf,
1078                                    u16 fpdu_len)
1079 {
1080         struct i40iw_puda_buf *buf;
1081         u32 nextseqnum;
1082         u16 txoffset, bufoffset;
1083 
1084         buf = i40iw_puda_get_listbuf(pbufl);
1085         if (!buf)
1086                 return;
1087         nextseqnum = buf->seqnum + fpdu_len;
1088         txbuf->totallen = buf->hdrlen + fpdu_len;
1089         txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
1090         i40iw_ieq_setup_tx_buf(buf, txbuf);
1091 
1092         txoffset = buf->hdrlen;
1093         bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1094 
1095         do {
1096                 if (buf->datalen >= fpdu_len) {
1097                         /* copied full fpdu */
1098                         i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
1099                         buf->datalen -= fpdu_len;
1100                         buf->data += fpdu_len;
1101                         buf->seqnum = nextseqnum;
1102                         break;
1103                 }
1104                 /* copy partial fpdu */
1105                 i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
1106                 txoffset += buf->datalen;
1107                 fpdu_len -= buf->datalen;
1108                 i40iw_puda_ret_bufpool(ieq, buf);
1109                 buf = i40iw_puda_get_listbuf(pbufl);
1110                 if (!buf)
1111                         return;
1112                 bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
1113         } while (1);
1114 
1115         /* last buffer on the list*/
1116         if (buf->datalen)
1117                 list_add(&buf->list, rxlist);
1118         else
1119                 i40iw_puda_ret_bufpool(ieq, buf);
1120 }
1121 
1122 /**
1123  * i40iw_ieq_create_pbufl - create buffer list for single fpdu
1124  * @rxlist: resource list for receive ieq buffes
1125  * @pbufl: temp. list for buffers for fpddu
1126  * @buf: first receive buffer
1127  * @fpdu_len: total length of fpdu
1128  */
1129 static enum i40iw_status_code i40iw_ieq_create_pbufl(
1130                                                      struct i40iw_pfpdu *pfpdu,
1131                                                      struct list_head *rxlist,
1132                                                      struct list_head *pbufl,
1133                                                      struct i40iw_puda_buf *buf,
1134                                                      u16 fpdu_len)
1135 {
1136         enum i40iw_status_code status = 0;
1137         struct i40iw_puda_buf *nextbuf;
1138         u32     nextseqnum;
1139         u16 plen = fpdu_len - buf->datalen;
1140         bool done = false;
1141 
1142         nextseqnum = buf->seqnum + buf->datalen;
1143         do {
1144                 nextbuf = i40iw_puda_get_listbuf(rxlist);
1145                 if (!nextbuf) {
1146                         status = I40IW_ERR_list_empty;
1147                         break;
1148                 }
1149                 list_add_tail(&nextbuf->list, pbufl);
1150                 if (nextbuf->seqnum != nextseqnum) {
1151                         pfpdu->bad_seq_num++;
1152                         status = I40IW_ERR_SEQ_NUM;
1153                         break;
1154                 }
1155                 if (nextbuf->datalen >= plen) {
1156                         done = true;
1157                 } else {
1158                         plen -= nextbuf->datalen;
1159                         nextseqnum = nextbuf->seqnum + nextbuf->datalen;
1160                 }
1161 
1162         } while (!done);
1163 
1164         return status;
1165 }
1166 
1167 /**
1168  * i40iw_ieq_handle_partial - process partial fpdu buffer
1169  * @ieq: ieq resource
1170  * @pfpdu: partial management per user qp
1171  * @buf: receive buffer
1172  * @fpdu_len: fpdu len in the buffer
1173  */
1174 static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
1175                                                        struct i40iw_pfpdu *pfpdu,
1176                                                        struct i40iw_puda_buf *buf,
1177                                                        u16 fpdu_len)
1178 {
1179         enum i40iw_status_code status = 0;
1180         u8 *crcptr;
1181         u32 mpacrc;
1182         u32 seqnum = buf->seqnum;
1183         struct list_head pbufl; /* partial buffer list */
1184         struct i40iw_puda_buf *txbuf = NULL;
1185         struct list_head *rxlist = &pfpdu->rxlist;
1186 
1187         INIT_LIST_HEAD(&pbufl);
1188         list_add(&buf->list, &pbufl);
1189 
1190         status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
1191         if (status)
1192                 goto error;
1193 
1194         txbuf = i40iw_puda_get_bufpool(ieq);
1195         if (!txbuf) {
1196                 pfpdu->no_tx_bufs++;
1197                 status = I40IW_ERR_NO_TXBUFS;
1198                 goto error;
1199         }
1200 
1201         i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
1202         i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
1203         crcptr = txbuf->data + fpdu_len - 4;
1204         mpacrc = *(u32 *)crcptr;
1205         if (ieq->check_crc) {
1206                 status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
1207                                                 (fpdu_len - 4), mpacrc);
1208                 if (status) {
1209                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1210                                     "%s: error bad crc\n", __func__);
1211                         goto error;
1212                 }
1213         }
1214 
1215         i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
1216                         txbuf->mem.va, txbuf->totallen);
1217         i40iw_puda_send_buf(ieq, txbuf);
1218         pfpdu->rcv_nxt = seqnum + fpdu_len;
1219         return status;
1220  error:
1221         while (!list_empty(&pbufl)) {
1222                 buf = (struct i40iw_puda_buf *)(pbufl.prev);
1223                 list_del(&buf->list);
1224                 list_add(&buf->list, rxlist);
1225         }
1226         if (txbuf)
1227                 i40iw_puda_ret_bufpool(ieq, txbuf);
1228         return status;
1229 }
1230 
1231 /**
1232  * i40iw_ieq_process_buf - process buffer rcvd for ieq
1233  * @ieq: ieq resource
1234  * @pfpdu: partial management per user qp
1235  * @buf: receive buffer
1236  */
1237 static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
1238                                                     struct i40iw_pfpdu *pfpdu,
1239                                                     struct i40iw_puda_buf *buf)
1240 {
1241         u16 fpdu_len = 0;
1242         u16 datalen = buf->datalen;
1243         u8 *datap = buf->data;
1244         u8 *crcptr;
1245         u16 ioffset = 0;
1246         u32 mpacrc;
1247         u32 seqnum = buf->seqnum;
1248         u16 length = 0;
1249         u16 full = 0;
1250         bool partial = false;
1251         struct i40iw_puda_buf *txbuf;
1252         struct list_head *rxlist = &pfpdu->rxlist;
1253         enum i40iw_status_code ret = 0;
1254         enum i40iw_status_code status = 0;
1255 
1256         ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
1257         while (datalen) {
1258                 fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
1259                 if (fpdu_len > pfpdu->max_fpdu_data) {
1260                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1261                                     "%s: error bad fpdu_len\n", __func__);
1262                         status = I40IW_ERR_MPA_CRC;
1263                         list_add(&buf->list, rxlist);
1264                         return status;
1265                 }
1266 
1267                 if (datalen < fpdu_len) {
1268                         partial = true;
1269                         break;
1270                 }
1271                 crcptr = datap + fpdu_len - 4;
1272                 mpacrc = *(u32 *)crcptr;
1273                 if (ieq->check_crc)
1274                         ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
1275                                                      datap, fpdu_len - 4, mpacrc);
1276                 if (ret) {
1277                         status = I40IW_ERR_MPA_CRC;
1278                         list_add(&buf->list, rxlist);
1279                         return status;
1280                 }
1281                 full++;
1282                 pfpdu->fpdu_processed++;
1283                 datap += fpdu_len;
1284                 length += fpdu_len;
1285                 datalen -= fpdu_len;
1286         }
1287         if (full) {
1288                 /* copy full pdu's in the txbuf and send them out */
1289                 txbuf = i40iw_puda_get_bufpool(ieq);
1290                 if (!txbuf) {
1291                         pfpdu->no_tx_bufs++;
1292                         status = I40IW_ERR_NO_TXBUFS;
1293                         list_add(&buf->list, rxlist);
1294                         return status;
1295                 }
1296                 /* modify txbuf's buffer header */
1297                 i40iw_ieq_setup_tx_buf(buf, txbuf);
1298                 /* copy full fpdu's to new buffer */
1299                 i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
1300                                         length);
1301                 txbuf->totallen = buf->hdrlen + length;
1302 
1303                 i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
1304                 i40iw_puda_send_buf(ieq, txbuf);
1305 
1306                 if (!datalen) {
1307                         pfpdu->rcv_nxt = buf->seqnum + length;
1308                         i40iw_puda_ret_bufpool(ieq, buf);
1309                         return status;
1310                 }
1311                 buf->data = datap;
1312                 buf->seqnum = seqnum + length;
1313                 buf->datalen = datalen;
1314                 pfpdu->rcv_nxt = buf->seqnum;
1315         }
1316         if (partial)
1317                 status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
1318 
1319         return status;
1320 }
1321 
1322 /**
1323  * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
1324  * @qp: qp for which partial fpdus
1325  * @ieq: ieq resource
1326  */
1327 static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
1328                                     struct i40iw_puda_rsrc *ieq)
1329 {
1330         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1331         struct list_head *rxlist = &pfpdu->rxlist;
1332         struct i40iw_puda_buf *buf;
1333         enum i40iw_status_code status;
1334 
1335         do {
1336                 if (list_empty(rxlist))
1337                         break;
1338                 buf = i40iw_puda_get_listbuf(rxlist);
1339                 if (!buf) {
1340                         i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1341                                     "%s: error no buf\n", __func__);
1342                         break;
1343                 }
1344                 if (buf->seqnum != pfpdu->rcv_nxt) {
1345                         /* This could be out of order or missing packet */
1346                         pfpdu->out_of_order++;
1347                         list_add(&buf->list, rxlist);
1348                         break;
1349                 }
1350                 /* keep processing buffers from the head of the list */
1351                 status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
1352                 if (status == I40IW_ERR_MPA_CRC) {
1353                         pfpdu->mpa_crc_err = true;
1354                         while (!list_empty(rxlist)) {
1355                                 buf = i40iw_puda_get_listbuf(rxlist);
1356                                 i40iw_puda_ret_bufpool(ieq, buf);
1357                                 pfpdu->crc_err++;
1358                         }
1359                         /* create CQP for AE */
1360                         i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
1361                 }
1362         } while (!status);
1363 }
1364 
1365 /**
1366  * i40iw_ieq_handle_exception - handle qp's exception
1367  * @ieq: ieq resource
1368  * @qp: qp receiving excpetion
1369  * @buf: receive buffer
1370  */
1371 static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1372                                        struct i40iw_sc_qp *qp,
1373                                        struct i40iw_puda_buf *buf)
1374 {
1375         struct i40iw_puda_buf *tmpbuf = NULL;
1376         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1377         u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
1378         u32 rcv_wnd = hw_host_ctx[23];
1379         /* first partial seq # in q2 */
1380         u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
1381         struct list_head *rxlist = &pfpdu->rxlist;
1382         struct list_head *plist;
1383 
1384         pfpdu->total_ieq_bufs++;
1385 
1386         if (pfpdu->mpa_crc_err) {
1387                 pfpdu->crc_err++;
1388                 goto error;
1389         }
1390         if (pfpdu->mode && (fps != pfpdu->fps)) {
1391                 /* clean up qp as it is new partial sequence */
1392                 i40iw_ieq_cleanup_qp(ieq, qp);
1393                 i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
1394                             "%s: restarting new partial\n", __func__);
1395                 pfpdu->mode = false;
1396         }
1397 
1398         if (!pfpdu->mode) {
1399                 i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
1400                 /* First_Partial_Sequence_Number check */
1401                 pfpdu->rcv_nxt = fps;
1402                 pfpdu->fps = fps;
1403                 pfpdu->mode = true;
1404                 pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) :
1405                                        (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6);
1406                 pfpdu->pmode_count++;
1407                 INIT_LIST_HEAD(rxlist);
1408                 i40iw_ieq_check_first_buf(buf, fps);
1409         }
1410 
1411         if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
1412                 pfpdu->bad_seq_num++;
1413                 goto error;
1414         }
1415 
1416         if (!list_empty(rxlist)) {
1417                 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1418                 while ((struct list_head *)tmpbuf != rxlist) {
1419                         if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1420                                 break;
1421                         plist = &tmpbuf->list;
1422                         tmpbuf = (struct i40iw_puda_buf *)plist->next;
1423                 }
1424                 /* Insert buf before tmpbuf */
1425                 list_add_tail(&buf->list, &tmpbuf->list);
1426         } else {
1427                 list_add_tail(&buf->list, rxlist);
1428         }
1429         i40iw_ieq_process_fpdus(qp, ieq);
1430         return;
1431  error:
1432         i40iw_puda_ret_bufpool(ieq, buf);
1433 }
1434 
1435 /**
1436  * i40iw_ieq_receive - received exception buffer
1437  * @dev: iwarp device
1438  * @buf: exception buffer received
1439  */
1440 static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi,
1441                               struct i40iw_puda_buf *buf)
1442 {
1443         struct i40iw_puda_rsrc *ieq = vsi->ieq;
1444         struct i40iw_sc_qp *qp = NULL;
1445         u32 wqe_idx = ieq->compl_rxwqe_idx;
1446 
1447         qp = i40iw_ieq_get_qp(vsi->dev, buf);
1448         if (!qp) {
1449                 ieq->stats_bad_qp_id++;
1450                 i40iw_puda_ret_bufpool(ieq, buf);
1451         } else {
1452                 i40iw_ieq_handle_exception(ieq, qp, buf);
1453         }
1454         /*
1455          * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
1456          * on which wqe_idx to start replenish rq
1457          */
1458         if (!ieq->rxq_invalid_cnt)
1459                 ieq->rx_wqe_idx = wqe_idx;
1460         ieq->rxq_invalid_cnt++;
1461 }
1462 
1463 /**
1464  * i40iw_ieq_tx_compl - put back after sending completed exception buffer
1465  * @vsi: pointer to the vsi structure
1466  * @sqwrid: pointer to puda buffer
1467  */
1468 static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
1469 {
1470         struct i40iw_puda_rsrc *ieq = vsi->ieq;
1471         struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
1472 
1473         i40iw_puda_ret_bufpool(ieq, buf);
1474 }
1475 
1476 /**
1477  * i40iw_ieq_cleanup_qp - qp is being destroyed
1478  * @ieq: ieq resource
1479  * @qp: all pending fpdu buffers
1480  */
1481 void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
1482 {
1483         struct i40iw_puda_buf *buf;
1484         struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
1485         struct list_head *rxlist = &pfpdu->rxlist;
1486 
1487         if (!pfpdu->mode)
1488                 return;
1489         while (!list_empty(rxlist)) {
1490                 buf = i40iw_puda_get_listbuf(rxlist);
1491                 i40iw_puda_ret_bufpool(ieq, buf);
1492         }
1493 }

/* [<][>][^][v][top][bottom][index][help] */