msrq 669 drivers/infiniband/hw/mlx4/cq.c struct mlx4_srq *msrq = NULL; msrq 732 drivers/infiniband/hw/mlx4/cq.c msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, msrq 749 drivers/infiniband/hw/mlx4/cq.c } else if (msrq) { msrq 750 drivers/infiniband/hw/mlx4/cq.c srq = to_mibsrq(msrq); msrq 356 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_srq msrq; msrq 706 drivers/infiniband/hw/mlx4/mlx4_ib.h static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq) msrq 708 drivers/infiniband/hw/mlx4/mlx4_ib.h return container_of(msrq, struct mlx4_ib_srq, msrq); msrq 2456 drivers/infiniband/hw/mlx4/qp.c to_msrq(ibsrq)->msrq.srqn); msrq 44 drivers/infiniband/hw/mlx4/srq.c return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); msrq 96 drivers/infiniband/hw/mlx4/srq.c srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); msrq 97 drivers/infiniband/hw/mlx4/srq.c srq->msrq.max_gs = init_attr->attr.max_sge; msrq 101 drivers/infiniband/hw/mlx4/srq.c srq->msrq.max_gs * msrq 103 drivers/infiniband/hw/mlx4/srq.c srq->msrq.wqe_shift = ilog2(desc_size); msrq 105 drivers/infiniband/hw/mlx4/srq.c buf_size = srq->msrq.max * desc_size; msrq 143 drivers/infiniband/hw/mlx4/srq.c srq->tail = srq->msrq.max - 1; msrq 146 drivers/infiniband/hw/mlx4/srq.c for (i = 0; i < srq->msrq.max; ++i) { msrq 149 drivers/infiniband/hw/mlx4/srq.c cpu_to_be16((i + 1) & (srq->msrq.max - 1)); msrq 166 drivers/infiniband/hw/mlx4/srq.c srq->wrid = kvmalloc_array(srq->msrq.max, msrq 180 drivers/infiniband/hw/mlx4/srq.c &srq->mtt, srq->db.dma, &srq->msrq); msrq 184 drivers/infiniband/hw/mlx4/srq.c srq->msrq.event = mlx4_ib_srq_event; msrq 185 drivers/infiniband/hw/mlx4/srq.c srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; msrq 188 drivers/infiniband/hw/mlx4/srq.c if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) { msrq 193 drivers/infiniband/hw/mlx4/srq.c init_attr->attr.max_wr = srq->msrq.max - 1; msrq 230 drivers/infiniband/hw/mlx4/srq.c if (attr->srq_limit >= srq->msrq.max) msrq 234 drivers/infiniband/hw/mlx4/srq.c ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit); msrq 251 drivers/infiniband/hw/mlx4/srq.c ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark); msrq 256 drivers/infiniband/hw/mlx4/srq.c srq_attr->max_wr = srq->msrq.max - 1; msrq 257 drivers/infiniband/hw/mlx4/srq.c srq_attr->max_sge = srq->msrq.max_gs; msrq 265 drivers/infiniband/hw/mlx4/srq.c struct mlx4_ib_srq *msrq = to_msrq(srq); msrq 267 drivers/infiniband/hw/mlx4/srq.c mlx4_srq_free(dev->dev, &msrq->msrq); msrq 268 drivers/infiniband/hw/mlx4/srq.c mlx4_mtt_cleanup(dev->dev, &msrq->mtt); msrq 276 drivers/infiniband/hw/mlx4/srq.c &msrq->db); msrq 278 drivers/infiniband/hw/mlx4/srq.c kvfree(msrq->wrid); msrq 279 drivers/infiniband/hw/mlx4/srq.c mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, msrq 280 drivers/infiniband/hw/mlx4/srq.c &msrq->buf); msrq 281 drivers/infiniband/hw/mlx4/srq.c mlx4_db_free(dev->dev, &msrq->db); msrq 283 drivers/infiniband/hw/mlx4/srq.c ib_umem_release(msrq->umem); msrq 321 drivers/infiniband/hw/mlx4/srq.c if (unlikely(wr->num_sge > srq->msrq.max_gs)) { msrq 345 drivers/infiniband/hw/mlx4/srq.c if (i < srq->msrq.max_gs) { msrq 178 drivers/infiniband/hw/mlx5/cq.c struct mlx5_core_srq *msrq = NULL; msrq 181 drivers/infiniband/hw/mlx5/cq.c msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn)); msrq 182 drivers/infiniband/hw/mlx5/cq.c srq = to_mibsrq(msrq); msrq 190 drivers/infiniband/hw/mlx5/cq.c if (msrq) msrq 191 drivers/infiniband/hw/mlx5/cq.c mlx5_core_res_put(&msrq->common); msrq 584 drivers/infiniband/hw/mlx5/devx.c struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq); msrq 602 drivers/infiniband/hw/mlx5/devx.c to_msrq(uobj->object)->msrq.srqn) == msrq 535 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_core_srq msrq; msrq 1073 drivers/infiniband/hw/mlx5/mlx5_ib.h static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq) msrq 1075 drivers/infiniband/hw/mlx5/mlx5_ib.h return container_of(msrq, struct mlx5_ib_srq, msrq); msrq 1090 drivers/infiniband/hw/mlx5/odp.c int wqe_size = 1 << srq->msrq.wqe_shift; msrq 1159 drivers/infiniband/hw/mlx5/odp.c struct mlx5_core_srq *msrq = msrq 1162 drivers/infiniband/hw/mlx5/odp.c return to_mibsrq(msrq); msrq 272 drivers/infiniband/hw/mlx5/qp.c srq->msrq.max, msrq 273 drivers/infiniband/hw/mlx5/qp.c srq->msrq.wqe_shift, msrq 2225 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); msrq 2231 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn); msrq 2236 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn); msrq 2239 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn); msrq 2573 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn); msrq 151 drivers/infiniband/hw/mlx5/srq.c mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max), msrq 155 drivers/infiniband/hw/mlx5/srq.c srq->tail = srq->msrq.max - 1; msrq 158 drivers/infiniband/hw/mlx5/srq.c for (i = 0; i < srq->msrq.max; i++) { msrq 161 drivers/infiniband/hw/mlx5/srq.c cpu_to_be16((i + 1) & (srq->msrq.max - 1)); msrq 172 drivers/infiniband/hw/mlx5/srq.c srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL); msrq 239 drivers/infiniband/hw/mlx5/srq.c srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); msrq 240 drivers/infiniband/hw/mlx5/srq.c srq->msrq.max_gs = init_attr->attr.max_sge; msrq 243 drivers/infiniband/hw/mlx5/srq.c srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); msrq 244 drivers/infiniband/hw/mlx5/srq.c if (desc_size == 0 || srq->msrq.max_gs > desc_size) msrq 252 drivers/infiniband/hw/mlx5/srq.c srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / msrq 254 drivers/infiniband/hw/mlx5/srq.c srq->msrq.wqe_shift = ilog2(desc_size); msrq 255 drivers/infiniband/hw/mlx5/srq.c buf_size = srq->msrq.max * desc_size; msrq 272 drivers/infiniband/hw/mlx5/srq.c in.log_size = ilog2(srq->msrq.max); msrq 273 drivers/infiniband/hw/mlx5/srq.c in.wqe_shift = srq->msrq.wqe_shift - 4; msrq 301 drivers/infiniband/hw/mlx5/srq.c err = mlx5_cmd_create_srq(dev, &srq->msrq, &in); msrq 308 drivers/infiniband/hw/mlx5/srq.c mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); msrq 310 drivers/infiniband/hw/mlx5/srq.c srq->msrq.event = mlx5_ib_srq_event; msrq 311 drivers/infiniband/hw/mlx5/srq.c srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; msrq 314 drivers/infiniband/hw/mlx5/srq.c if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { msrq 320 drivers/infiniband/hw/mlx5/srq.c init_attr->attr.max_wr = srq->msrq.max - 1; msrq 325 drivers/infiniband/hw/mlx5/srq.c mlx5_cmd_destroy_srq(dev, &srq->msrq); msrq 348 drivers/infiniband/hw/mlx5/srq.c if (attr->srq_limit >= srq->msrq.max) msrq 352 drivers/infiniband/hw/mlx5/srq.c ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1); msrq 373 drivers/infiniband/hw/mlx5/srq.c ret = mlx5_cmd_query_srq(dev, &srq->msrq, out); msrq 378 drivers/infiniband/hw/mlx5/srq.c srq_attr->max_wr = srq->msrq.max - 1; msrq 379 drivers/infiniband/hw/mlx5/srq.c srq_attr->max_sge = srq->msrq.max_gs; msrq 389 drivers/infiniband/hw/mlx5/srq.c struct mlx5_ib_srq *msrq = to_msrq(srq); msrq 391 drivers/infiniband/hw/mlx5/srq.c mlx5_cmd_destroy_srq(dev, &msrq->msrq); msrq 399 drivers/infiniband/hw/mlx5/srq.c &msrq->db); msrq 400 drivers/infiniband/hw/mlx5/srq.c ib_umem_release(msrq->umem); msrq 402 drivers/infiniband/hw/mlx5/srq.c destroy_srq_kernel(dev, msrq); msrq 442 drivers/infiniband/hw/mlx5/srq.c if (unlikely(wr->num_sge > srq->msrq.max_gs)) { msrq 466 drivers/infiniband/hw/mlx5/srq.c if (i < srq->msrq.max_avail_gather) {