root/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mlx5_fpga_conn_map_buf
  2. mlx5_fpga_conn_unmap_buf
  3. mlx5_fpga_conn_post_recv
  4. mlx5_fpga_conn_notify_hw
  5. mlx5_fpga_conn_post_send
  6. mlx5_fpga_conn_send
  7. mlx5_fpga_conn_post_recv_buf
  8. mlx5_fpga_conn_create_mkey
  9. mlx5_fpga_conn_rq_cqe
  10. mlx5_fpga_conn_sq_cqe
  11. mlx5_fpga_conn_handle_cqe
  12. mlx5_fpga_conn_arm_cq
  13. mlx5_fpga_conn_cq_event
  14. mlx5_fpga_conn_event
  15. mlx5_fpga_conn_cqes
  16. mlx5_fpga_conn_cq_tasklet
  17. mlx5_fpga_conn_cq_complete
  18. mlx5_fpga_conn_create_cq
  19. mlx5_fpga_conn_destroy_cq
  20. mlx5_fpga_conn_create_wq
  21. mlx5_fpga_conn_create_qp
  22. mlx5_fpga_conn_free_recv_bufs
  23. mlx5_fpga_conn_flush_send_bufs
  24. mlx5_fpga_conn_destroy_qp
  25. mlx5_fpga_conn_reset_qp
  26. mlx5_fpga_conn_init_qp
  27. mlx5_fpga_conn_rtr_qp
  28. mlx5_fpga_conn_rts_qp
  29. mlx5_fpga_conn_connect
  30. mlx5_fpga_conn_create
  31. mlx5_fpga_conn_destroy
  32. mlx5_fpga_conn_device_init
  33. mlx5_fpga_conn_device_cleanup

   1 /*
   2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *
  18  *      - Redistributions in binary form must reproduce the above
  19  *        copyright notice, this list of conditions and the following
  20  *        disclaimer in the documentation and/or other materials
  21  *        provided with the distribution.
  22  *
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30  * SOFTWARE.
  31  *
  32  */
  33 
  34 #include <net/addrconf.h>
  35 #include <linux/etherdevice.h>
  36 #include <linux/mlx5/vport.h>
  37 
  38 #include "mlx5_core.h"
  39 #include "lib/mlx5.h"
  40 #include "fpga/conn.h"
  41 
  42 #define MLX5_FPGA_PKEY 0xFFFF
  43 #define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
  44 #define MLX5_FPGA_RECV_SIZE 2048
  45 #define MLX5_FPGA_PORT_NUM 1
  46 #define MLX5_FPGA_CQ_BUDGET 64
  47 
  48 static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
  49                                   struct mlx5_fpga_dma_buf *buf)
  50 {
  51         struct device *dma_device;
  52         int err = 0;
  53 
  54         if (unlikely(!buf->sg[0].data))
  55                 goto out;
  56 
  57         dma_device = &conn->fdev->mdev->pdev->dev;
  58         buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
  59                                              buf->sg[0].size, buf->dma_dir);
  60         err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
  61         if (unlikely(err)) {
  62                 mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
  63                 err = -ENOMEM;
  64                 goto out;
  65         }
  66 
  67         if (!buf->sg[1].data)
  68                 goto out;
  69 
  70         buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
  71                                              buf->sg[1].size, buf->dma_dir);
  72         err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
  73         if (unlikely(err)) {
  74                 mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
  75                 dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  76                                  buf->sg[0].size, buf->dma_dir);
  77                 err = -ENOMEM;
  78         }
  79 
  80 out:
  81         return err;
  82 }
  83 
  84 static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
  85                                      struct mlx5_fpga_dma_buf *buf)
  86 {
  87         struct device *dma_device;
  88 
  89         dma_device = &conn->fdev->mdev->pdev->dev;
  90         if (buf->sg[1].data)
  91                 dma_unmap_single(dma_device, buf->sg[1].dma_addr,
  92                                  buf->sg[1].size, buf->dma_dir);
  93 
  94         if (likely(buf->sg[0].data))
  95                 dma_unmap_single(dma_device, buf->sg[0].dma_addr,
  96                                  buf->sg[0].size, buf->dma_dir);
  97 }
  98 
  99 static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
 100                                     struct mlx5_fpga_dma_buf *buf)
 101 {
 102         struct mlx5_wqe_data_seg *data;
 103         unsigned int ix;
 104         int err = 0;
 105 
 106         err = mlx5_fpga_conn_map_buf(conn, buf);
 107         if (unlikely(err))
 108                 goto out;
 109 
 110         if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
 111                 mlx5_fpga_conn_unmap_buf(conn, buf);
 112                 return -EBUSY;
 113         }
 114 
 115         ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
 116         data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
 117         data->byte_count = cpu_to_be32(buf->sg[0].size);
 118         data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 119         data->addr = cpu_to_be64(buf->sg[0].dma_addr);
 120 
 121         conn->qp.rq.pc++;
 122         conn->qp.rq.bufs[ix] = buf;
 123 
 124         /* Make sure that descriptors are written before doorbell record. */
 125         dma_wmb();
 126         *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
 127 out:
 128         return err;
 129 }
 130 
 131 static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
 132 {
 133         /* ensure wqe is visible to device before updating doorbell record */
 134         dma_wmb();
 135         *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
 136         /* Make sure that doorbell record is visible before ringing */
 137         wmb();
 138         mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
 139 }
 140 
 141 static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
 142                                      struct mlx5_fpga_dma_buf *buf)
 143 {
 144         struct mlx5_wqe_ctrl_seg *ctrl;
 145         struct mlx5_wqe_data_seg *data;
 146         unsigned int ix, sgi;
 147         int size = 1;
 148 
 149         ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
 150 
 151         ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
 152         data = (void *)(ctrl + 1);
 153 
 154         for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
 155                 if (!buf->sg[sgi].data)
 156                         break;
 157                 data->byte_count = cpu_to_be32(buf->sg[sgi].size);
 158                 data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
 159                 data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
 160                 data++;
 161                 size++;
 162         }
 163 
 164         ctrl->imm = 0;
 165         ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 166         ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
 167                                              MLX5_OPCODE_SEND);
 168         ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8));
 169 
 170         conn->qp.sq.pc++;
 171         conn->qp.sq.bufs[ix] = buf;
 172         mlx5_fpga_conn_notify_hw(conn, ctrl);
 173 }
 174 
 175 int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
 176                         struct mlx5_fpga_dma_buf *buf)
 177 {
 178         unsigned long flags;
 179         int err;
 180 
 181         if (!conn->qp.active)
 182                 return -ENOTCONN;
 183 
 184         buf->dma_dir = DMA_TO_DEVICE;
 185         err = mlx5_fpga_conn_map_buf(conn, buf);
 186         if (err)
 187                 return err;
 188 
 189         spin_lock_irqsave(&conn->qp.sq.lock, flags);
 190 
 191         if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
 192                 list_add_tail(&buf->list, &conn->qp.sq.backlog);
 193                 goto out_unlock;
 194         }
 195 
 196         mlx5_fpga_conn_post_send(conn, buf);
 197 
 198 out_unlock:
 199         spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 200         return err;
 201 }
 202 
 203 static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
 204 {
 205         struct mlx5_fpga_dma_buf *buf;
 206         int err;
 207 
 208         buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
 209         if (!buf)
 210                 return -ENOMEM;
 211 
 212         buf->sg[0].data = (void *)(buf + 1);
 213         buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 214         buf->dma_dir = DMA_FROM_DEVICE;
 215 
 216         err = mlx5_fpga_conn_post_recv(conn, buf);
 217         if (err)
 218                 kfree(buf);
 219 
 220         return err;
 221 }
 222 
 223 static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
 224                                       struct mlx5_core_mkey *mkey)
 225 {
 226         int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
 227         void *mkc;
 228         u32 *in;
 229         int err;
 230 
 231         in = kvzalloc(inlen, GFP_KERNEL);
 232         if (!in)
 233                 return -ENOMEM;
 234 
 235         mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
 236         MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
 237         MLX5_SET(mkc, mkc, lw, 1);
 238         MLX5_SET(mkc, mkc, lr, 1);
 239 
 240         MLX5_SET(mkc, mkc, pd, pdn);
 241         MLX5_SET(mkc, mkc, length64, 1);
 242         MLX5_SET(mkc, mkc, qpn, 0xffffff);
 243 
 244         err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
 245 
 246         kvfree(in);
 247         return err;
 248 }
 249 
 250 static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
 251                                   struct mlx5_cqe64 *cqe, u8 status)
 252 {
 253         struct mlx5_fpga_dma_buf *buf;
 254         int ix, err;
 255 
 256         ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
 257         buf = conn->qp.rq.bufs[ix];
 258         conn->qp.rq.bufs[ix] = NULL;
 259         conn->qp.rq.cc++;
 260 
 261         if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 262                 mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 263                                buf, conn->fpga_qpn, status);
 264         else
 265                 mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
 266                               buf, conn->fpga_qpn, status);
 267 
 268         mlx5_fpga_conn_unmap_buf(conn, buf);
 269 
 270         if (unlikely(status || !conn->qp.active)) {
 271                 conn->qp.active = false;
 272                 kfree(buf);
 273                 return;
 274         }
 275 
 276         buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
 277         mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
 278                       buf->sg[0].size);
 279         conn->recv_cb(conn->cb_arg, buf);
 280 
 281         buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
 282         err = mlx5_fpga_conn_post_recv(conn, buf);
 283         if (unlikely(err)) {
 284                 mlx5_fpga_warn(conn->fdev,
 285                                "Failed to re-post recv buf: %d\n", err);
 286                 kfree(buf);
 287         }
 288 }
 289 
 290 static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
 291                                   struct mlx5_cqe64 *cqe, u8 status)
 292 {
 293         struct mlx5_fpga_dma_buf *buf, *nextbuf;
 294         unsigned long flags;
 295         int ix;
 296 
 297         spin_lock_irqsave(&conn->qp.sq.lock, flags);
 298 
 299         ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
 300         buf = conn->qp.sq.bufs[ix];
 301         conn->qp.sq.bufs[ix] = NULL;
 302         conn->qp.sq.cc++;
 303 
 304         /* Handle backlog still under the spinlock to ensure message post order */
 305         if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
 306                 if (likely(conn->qp.active)) {
 307                         nextbuf = list_first_entry(&conn->qp.sq.backlog,
 308                                                    struct mlx5_fpga_dma_buf, list);
 309                         list_del(&nextbuf->list);
 310                         mlx5_fpga_conn_post_send(conn, nextbuf);
 311                 }
 312         }
 313 
 314         spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
 315 
 316         if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
 317                 mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 318                                buf, conn->fpga_qpn, status);
 319         else
 320                 mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
 321                               buf, conn->fpga_qpn, status);
 322 
 323         mlx5_fpga_conn_unmap_buf(conn, buf);
 324 
 325         if (likely(buf->complete))
 326                 buf->complete(conn, conn->fdev, buf, status);
 327 
 328         if (unlikely(status))
 329                 conn->qp.active = false;
 330 }
 331 
 332 static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
 333                                       struct mlx5_cqe64 *cqe)
 334 {
 335         u8 opcode, status = 0;
 336 
 337         opcode = get_cqe_opcode(cqe);
 338 
 339         switch (opcode) {
 340         case MLX5_CQE_REQ_ERR:
 341                 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 342                 /* Fall through */
 343         case MLX5_CQE_REQ:
 344                 mlx5_fpga_conn_sq_cqe(conn, cqe, status);
 345                 break;
 346 
 347         case MLX5_CQE_RESP_ERR:
 348                 status = ((struct mlx5_err_cqe *)cqe)->syndrome;
 349                 /* Fall through */
 350         case MLX5_CQE_RESP_SEND:
 351                 mlx5_fpga_conn_rq_cqe(conn, cqe, status);
 352                 break;
 353         default:
 354                 mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
 355                                opcode);
 356         }
 357 }
 358 
 359 static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
 360 {
 361         mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
 362                     conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
 363 }
 364 
 365 static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq,
 366                                     enum mlx5_event event)
 367 {
 368         struct mlx5_fpga_conn *conn;
 369 
 370         conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 371         mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn);
 372 }
 373 
 374 static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event)
 375 {
 376         struct mlx5_fpga_conn *conn;
 377 
 378         conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp);
 379         mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn);
 380 }
 381 
 382 static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
 383                                        unsigned int budget)
 384 {
 385         struct mlx5_cqe64 *cqe;
 386 
 387         while (budget) {
 388                 cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
 389                 if (!cqe)
 390                         break;
 391 
 392                 budget--;
 393                 mlx5_cqwq_pop(&conn->cq.wq);
 394                 mlx5_fpga_conn_handle_cqe(conn, cqe);
 395                 mlx5_cqwq_update_db_record(&conn->cq.wq);
 396         }
 397         if (!budget) {
 398                 tasklet_schedule(&conn->cq.tasklet);
 399                 return;
 400         }
 401 
 402         mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
 403         /* ensure cq space is freed before enabling more cqes */
 404         wmb();
 405         mlx5_fpga_conn_arm_cq(conn);
 406 }
 407 
 408 static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
 409 {
 410         struct mlx5_fpga_conn *conn = (void *)data;
 411 
 412         if (unlikely(!conn->qp.active))
 413                 return;
 414         mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 415 }
 416 
 417 static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
 418                                        struct mlx5_eqe *eqe)
 419 {
 420         struct mlx5_fpga_conn *conn;
 421 
 422         conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
 423         if (unlikely(!conn->qp.active))
 424                 return;
 425         mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
 426 }
 427 
 428 static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
 429 {
 430         struct mlx5_fpga_device *fdev = conn->fdev;
 431         struct mlx5_core_dev *mdev = fdev->mdev;
 432         u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
 433         u32 out[MLX5_ST_SZ_DW(create_cq_out)];
 434         struct mlx5_wq_param wqp;
 435         struct mlx5_cqe64 *cqe;
 436         int inlen, err, eqn;
 437         unsigned int irqn;
 438         void *cqc, *in;
 439         __be64 *pas;
 440         u32 i;
 441 
 442         cq_size = roundup_pow_of_two(cq_size);
 443         MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
 444 
 445         wqp.buf_numa_node = mdev->priv.numa_node;
 446         wqp.db_numa_node  = mdev->priv.numa_node;
 447 
 448         err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
 449                                &conn->cq.wq_ctrl);
 450         if (err)
 451                 return err;
 452 
 453         for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
 454                 cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
 455                 cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
 456         }
 457 
 458         inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
 459                 sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
 460         in = kvzalloc(inlen, GFP_KERNEL);
 461         if (!in) {
 462                 err = -ENOMEM;
 463                 goto err_cqwq;
 464         }
 465 
 466         err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
 467         if (err) {
 468                 kvfree(in);
 469                 goto err_cqwq;
 470         }
 471 
 472         cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
 473         MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
 474         MLX5_SET(cqc, cqc, c_eqn, eqn);
 475         MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
 476         MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
 477                            MLX5_ADAPTER_PAGE_SHIFT);
 478         MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
 479 
 480         pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
 481         mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
 482 
 483         err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
 484         kvfree(in);
 485 
 486         if (err)
 487                 goto err_cqwq;
 488 
 489         conn->cq.mcq.cqe_sz     = 64;
 490         conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
 491         conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
 492         *conn->cq.mcq.set_ci_db = 0;
 493         *conn->cq.mcq.arm_db    = 0;
 494         conn->cq.mcq.vector     = 0;
 495         conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
 496         conn->cq.mcq.event      = mlx5_fpga_conn_cq_event;
 497         conn->cq.mcq.irqn       = irqn;
 498         conn->cq.mcq.uar        = fdev->conn_res.uar;
 499         tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
 500                      (unsigned long)conn);
 501 
 502         mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
 503 
 504         goto out;
 505 
 506 err_cqwq:
 507         mlx5_wq_destroy(&conn->cq.wq_ctrl);
 508 out:
 509         return err;
 510 }
 511 
 512 static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
 513 {
 514         tasklet_disable(&conn->cq.tasklet);
 515         tasklet_kill(&conn->cq.tasklet);
 516         mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
 517         mlx5_wq_destroy(&conn->cq.wq_ctrl);
 518 }
 519 
 520 static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
 521 {
 522         struct mlx5_fpga_device *fdev = conn->fdev;
 523         struct mlx5_core_dev *mdev = fdev->mdev;
 524         struct mlx5_wq_param wqp;
 525 
 526         wqp.buf_numa_node = mdev->priv.numa_node;
 527         wqp.db_numa_node  = mdev->priv.numa_node;
 528 
 529         return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
 530                                  &conn->qp.wq_ctrl);
 531 }
 532 
 533 static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
 534                                     unsigned int tx_size, unsigned int rx_size)
 535 {
 536         struct mlx5_fpga_device *fdev = conn->fdev;
 537         struct mlx5_core_dev *mdev = fdev->mdev;
 538         u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {0};
 539         void *in = NULL, *qpc;
 540         int err, inlen;
 541 
 542         conn->qp.rq.pc = 0;
 543         conn->qp.rq.cc = 0;
 544         conn->qp.rq.size = roundup_pow_of_two(rx_size);
 545         conn->qp.sq.pc = 0;
 546         conn->qp.sq.cc = 0;
 547         conn->qp.sq.size = roundup_pow_of_two(tx_size);
 548 
 549         MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 550         MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
 551         MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
 552         err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
 553         if (err)
 554                 goto out;
 555 
 556         conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
 557                                     sizeof(conn->qp.rq.bufs[0]),
 558                                     GFP_KERNEL);
 559         if (!conn->qp.rq.bufs) {
 560                 err = -ENOMEM;
 561                 goto err_wq;
 562         }
 563 
 564         conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
 565                                     sizeof(conn->qp.sq.bufs[0]),
 566                                     GFP_KERNEL);
 567         if (!conn->qp.sq.bufs) {
 568                 err = -ENOMEM;
 569                 goto err_rq_bufs;
 570         }
 571 
 572         inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
 573                 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
 574                 conn->qp.wq_ctrl.buf.npages;
 575         in = kvzalloc(inlen, GFP_KERNEL);
 576         if (!in) {
 577                 err = -ENOMEM;
 578                 goto err_sq_bufs;
 579         }
 580 
 581         qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 582         MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
 583         MLX5_SET(qpc, qpc, log_page_size,
 584                  conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 585         MLX5_SET(qpc, qpc, fre, 1);
 586         MLX5_SET(qpc, qpc, rlky, 1);
 587         MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 588         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 589         MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
 590         MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
 591         MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
 592         MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
 593         MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
 594         MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 595         MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 596         MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 597         if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
 598                 MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
 599 
 600         mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
 601                                   (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
 602 
 603         err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen);
 604         if (err)
 605                 goto err_sq_bufs;
 606 
 607         conn->qp.mqp.event = mlx5_fpga_conn_event;
 608         mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn);
 609 
 610         goto out;
 611 
 612 err_sq_bufs:
 613         kvfree(conn->qp.sq.bufs);
 614 err_rq_bufs:
 615         kvfree(conn->qp.rq.bufs);
 616 err_wq:
 617         mlx5_wq_destroy(&conn->qp.wq_ctrl);
 618 out:
 619         kvfree(in);
 620         return err;
 621 }
 622 
 623 static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
 624 {
 625         int ix;
 626 
 627         for (ix = 0; ix < conn->qp.rq.size; ix++) {
 628                 if (!conn->qp.rq.bufs[ix])
 629                         continue;
 630                 mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
 631                 kfree(conn->qp.rq.bufs[ix]);
 632                 conn->qp.rq.bufs[ix] = NULL;
 633         }
 634 }
 635 
 636 static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
 637 {
 638         struct mlx5_fpga_dma_buf *buf, *temp;
 639         int ix;
 640 
 641         for (ix = 0; ix < conn->qp.sq.size; ix++) {
 642                 buf = conn->qp.sq.bufs[ix];
 643                 if (!buf)
 644                         continue;
 645                 conn->qp.sq.bufs[ix] = NULL;
 646                 mlx5_fpga_conn_unmap_buf(conn, buf);
 647                 if (!buf->complete)
 648                         continue;
 649                 buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 650         }
 651         list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
 652                 mlx5_fpga_conn_unmap_buf(conn, buf);
 653                 if (!buf->complete)
 654                         continue;
 655                 buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
 656         }
 657 }
 658 
 659 static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
 660 {
 661         mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp);
 662         mlx5_fpga_conn_free_recv_bufs(conn);
 663         mlx5_fpga_conn_flush_send_bufs(conn);
 664         kvfree(conn->qp.sq.bufs);
 665         kvfree(conn->qp.rq.bufs);
 666         mlx5_wq_destroy(&conn->qp.wq_ctrl);
 667 }
 668 
 669 static inline int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
 670 {
 671         struct mlx5_core_dev *mdev = conn->fdev->mdev;
 672 
 673         mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn);
 674 
 675         return mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, NULL,
 676                                    &conn->qp.mqp);
 677 }
 678 
 679 static inline int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
 680 {
 681         struct mlx5_fpga_device *fdev = conn->fdev;
 682         struct mlx5_core_dev *mdev = fdev->mdev;
 683         u32 *qpc = NULL;
 684         int err;
 685 
 686         mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn);
 687 
 688         qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 689         if (!qpc) {
 690                 err = -ENOMEM;
 691                 goto out;
 692         }
 693 
 694         MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
 695         MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
 696         MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 697         MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 698         MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
 699         MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
 700         MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
 701         MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
 702 
 703         err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, qpc,
 704                                   &conn->qp.mqp);
 705         if (err) {
 706                 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 707                 goto out;
 708         }
 709 
 710 out:
 711         kfree(qpc);
 712         return err;
 713 }
 714 
 715 static inline int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
 716 {
 717         struct mlx5_fpga_device *fdev = conn->fdev;
 718         struct mlx5_core_dev *mdev = fdev->mdev;
 719         u32 *qpc = NULL;
 720         int err;
 721 
 722         mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
 723 
 724         qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 725         if (!qpc) {
 726                 err = -ENOMEM;
 727                 goto out;
 728         }
 729 
 730         MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
 731         MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
 732         MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
 733         MLX5_SET(qpc, qpc, next_rcv_psn,
 734                  MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
 735         MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
 736         MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
 737         ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
 738                         MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
 739         MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
 740                  MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
 741         MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
 742                  conn->qp.sgid_index);
 743         MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
 744         memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
 745                MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
 746                MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
 747 
 748         err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, qpc,
 749                                   &conn->qp.mqp);
 750         if (err) {
 751                 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 752                 goto out;
 753         }
 754 
 755 out:
 756         kfree(qpc);
 757         return err;
 758 }
 759 
 760 static inline int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
 761 {
 762         struct mlx5_fpga_device *fdev = conn->fdev;
 763         struct mlx5_core_dev *mdev = fdev->mdev;
 764         u32 *qpc = NULL;
 765         u32 opt_mask;
 766         int err;
 767 
 768         mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
 769 
 770         qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
 771         if (!qpc) {
 772                 err = -ENOMEM;
 773                 goto out;
 774         }
 775 
 776         MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
 777         MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
 778         MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
 779         MLX5_SET(qpc, qpc, next_send_psn,
 780                  MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
 781         MLX5_SET(qpc, qpc, retry_count, 7);
 782         MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
 783 
 784         opt_mask = MLX5_QP_OPTPAR_RNR_TIMEOUT;
 785         err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, opt_mask, qpc,
 786                                   &conn->qp.mqp);
 787         if (err) {
 788                 mlx5_fpga_warn(fdev, "qp_modify RST2INIT failed: %d\n", err);
 789                 goto out;
 790         }
 791 
 792 out:
 793         kfree(qpc);
 794         return err;
 795 }
 796 
 797 static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
 798 {
 799         struct mlx5_fpga_device *fdev = conn->fdev;
 800         int err;
 801 
 802         MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
 803         err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 804                                   MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
 805         if (err) {
 806                 mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
 807                 goto out;
 808         }
 809 
 810         err = mlx5_fpga_conn_reset_qp(conn);
 811         if (err) {
 812                 mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
 813                 goto err_fpga_qp;
 814         }
 815 
 816         err = mlx5_fpga_conn_init_qp(conn);
 817         if (err) {
 818                 mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
 819                 goto err_fpga_qp;
 820         }
 821         conn->qp.active = true;
 822 
 823         while (!mlx5_fpga_conn_post_recv_buf(conn))
 824                 ;
 825 
 826         err = mlx5_fpga_conn_rtr_qp(conn);
 827         if (err) {
 828                 mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
 829                 goto err_recv_bufs;
 830         }
 831 
 832         err = mlx5_fpga_conn_rts_qp(conn);
 833         if (err) {
 834                 mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
 835                 goto err_recv_bufs;
 836         }
 837         goto out;
 838 
 839 err_recv_bufs:
 840         mlx5_fpga_conn_free_recv_bufs(conn);
 841 err_fpga_qp:
 842         MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 843         if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
 844                                 MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
 845                 mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
 846 out:
 847         return err;
 848 }
 849 
 850 struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
 851                                              struct mlx5_fpga_conn_attr *attr,
 852                                              enum mlx5_ifc_fpga_qp_type qp_type)
 853 {
 854         struct mlx5_fpga_conn *ret, *conn;
 855         u8 *remote_mac, *remote_ip;
 856         int err;
 857 
 858         if (!attr->recv_cb)
 859                 return ERR_PTR(-EINVAL);
 860 
 861         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 862         if (!conn)
 863                 return ERR_PTR(-ENOMEM);
 864 
 865         conn->fdev = fdev;
 866         INIT_LIST_HEAD(&conn->qp.sq.backlog);
 867 
 868         spin_lock_init(&conn->qp.sq.lock);
 869 
 870         conn->recv_cb = attr->recv_cb;
 871         conn->cb_arg = attr->cb_arg;
 872 
 873         remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
 874         err = mlx5_query_mac_address(fdev->mdev, remote_mac);
 875         if (err) {
 876                 mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
 877                 ret = ERR_PTR(err);
 878                 goto err;
 879         }
 880 
 881         /* Build Modified EUI-64 IPv6 address from the MAC address */
 882         remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
 883         remote_ip[0] = 0xfe;
 884         remote_ip[1] = 0x80;
 885         addrconf_addr_eui48(&remote_ip[8], remote_mac);
 886 
 887         err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
 888         if (err) {
 889                 mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
 890                 ret = ERR_PTR(err);
 891                 goto err;
 892         }
 893 
 894         err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
 895                                      MLX5_ROCE_VERSION_2,
 896                                      MLX5_ROCE_L3_TYPE_IPV6,
 897                                      remote_ip, remote_mac, true, 0,
 898                                      MLX5_FPGA_PORT_NUM);
 899         if (err) {
 900                 mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
 901                 ret = ERR_PTR(err);
 902                 goto err_rsvd_gid;
 903         }
 904         mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
 905 
 906         /* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
 907          * created during processing of the cqe
 908          */
 909         err = mlx5_fpga_conn_create_cq(conn,
 910                                        (attr->tx_size + attr->rx_size) * 2);
 911         if (err) {
 912                 mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
 913                 ret = ERR_PTR(err);
 914                 goto err_gid;
 915         }
 916 
 917         mlx5_fpga_conn_arm_cq(conn);
 918 
 919         err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
 920         if (err) {
 921                 mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
 922                 ret = ERR_PTR(err);
 923                 goto err_cq;
 924         }
 925 
 926         MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
 927         MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
 928         MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
 929         MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
 930         MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
 931         MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
 932         MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
 933         MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
 934         MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn);
 935         MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
 936         MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
 937 
 938         err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
 939                                   &conn->fpga_qpn);
 940         if (err) {
 941                 mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
 942                 ret = ERR_PTR(err);
 943                 goto err_qp;
 944         }
 945 
 946         err = mlx5_fpga_conn_connect(conn);
 947         if (err) {
 948                 ret = ERR_PTR(err);
 949                 goto err_conn;
 950         }
 951 
 952         mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
 953         ret = conn;
 954         goto out;
 955 
 956 err_conn:
 957         mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 958 err_qp:
 959         mlx5_fpga_conn_destroy_qp(conn);
 960 err_cq:
 961         mlx5_fpga_conn_destroy_cq(conn);
 962 err_gid:
 963         mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
 964                                NULL, false, 0, MLX5_FPGA_PORT_NUM);
 965 err_rsvd_gid:
 966         mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
 967 err:
 968         kfree(conn);
 969 out:
 970         return ret;
 971 }
 972 
 973 void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
 974 {
 975         struct mlx5_fpga_device *fdev = conn->fdev;
 976         struct mlx5_core_dev *mdev = fdev->mdev;
 977         int err = 0;
 978 
 979         conn->qp.active = false;
 980         tasklet_disable(&conn->cq.tasklet);
 981         synchronize_irq(conn->cq.mcq.irqn);
 982 
 983         mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
 984         err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, NULL,
 985                                   &conn->qp.mqp);
 986         if (err)
 987                 mlx5_fpga_warn(fdev, "qp_modify 2ERR failed: %d\n", err);
 988         mlx5_fpga_conn_destroy_qp(conn);
 989         mlx5_fpga_conn_destroy_cq(conn);
 990 
 991         mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
 992                                NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
 993         mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
 994         kfree(conn);
 995 }
 996 
 997 int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
 998 {
 999         int err;
1000 
1001         err = mlx5_nic_vport_enable_roce(fdev->mdev);
1002         if (err) {
1003                 mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
1004                 goto out;
1005         }
1006 
1007         fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
1008         if (IS_ERR(fdev->conn_res.uar)) {
1009                 err = PTR_ERR(fdev->conn_res.uar);
1010                 mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
1011                 goto err_roce;
1012         }
1013         mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
1014                       fdev->conn_res.uar->index);
1015 
1016         err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
1017         if (err) {
1018                 mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
1019                 goto err_uar;
1020         }
1021         mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
1022 
1023         err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
1024                                          &fdev->conn_res.mkey);
1025         if (err) {
1026                 mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
1027                 goto err_dealloc_pd;
1028         }
1029         mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
1030 
1031         return 0;
1032 
1033 err_dealloc_pd:
1034         mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1035 err_uar:
1036         mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1037 err_roce:
1038         mlx5_nic_vport_disable_roce(fdev->mdev);
1039 out:
1040         return err;
1041 }
1042 
1043 void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
1044 {
1045         mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
1046         mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
1047         mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
1048         mlx5_nic_vport_disable_roce(fdev->mdev);
1049 }

/* [<][>][^][v][top][bottom][index][help] */