Lines Matching refs:srq

46 static void *get_wqe(struct mlx5_ib_srq *srq, int n)  in get_wqe()  argument
48 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); in get_wqe()
51 static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) in mlx5_ib_srq_event() argument
54 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq; in mlx5_ib_srq_event()
58 event.element.srq = ibsrq; in mlx5_ib_srq_event()
68 type, srq->srqn); in mlx5_ib_srq_event()
76 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, in create_srq_user() argument
103 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); in create_srq_user()
105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, in create_srq_user()
107 if (IS_ERR(srq->umem)) { in create_srq_user()
109 err = PTR_ERR(srq->umem); in create_srq_user()
113 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, in create_srq_user()
129 mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0); in create_srq_user()
132 ucmd.db_addr, &srq->db); in create_srq_user()
147 ib_umem_release(srq->umem); in create_srq_user()
152 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, in create_srq_kernel() argument
162 err = mlx5_db_alloc(dev->mdev, &srq->db); in create_srq_kernel()
168 if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { in create_srq_kernel()
173 page_shift = srq->buf.page_shift; in create_srq_kernel()
175 srq->head = 0; in create_srq_kernel()
176 srq->tail = srq->msrq.max - 1; in create_srq_kernel()
177 srq->wqe_ctr = 0; in create_srq_kernel()
179 for (i = 0; i < srq->msrq.max; i++) { in create_srq_kernel()
180 next = get_wqe(srq, i); in create_srq_kernel()
182 cpu_to_be16((i + 1) & (srq->msrq.max - 1)); in create_srq_kernel()
185 npages = DIV_ROUND_UP(srq->buf.npages, 1 << (page_shift - PAGE_SHIFT)); in create_srq_kernel()
187 buf_size, page_shift, srq->buf.npages, npages); in create_srq_kernel()
194 mlx5_fill_page_array(&srq->buf, (*in)->pas); in create_srq_kernel()
196 srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL); in create_srq_kernel()
197 if (!srq->wrid) { in create_srq_kernel()
199 (unsigned long)(srq->msrq.max * sizeof(u64))); in create_srq_kernel()
203 srq->wq_sig = !!srq_signature; in create_srq_kernel()
213 mlx5_buf_free(dev->mdev, &srq->buf); in create_srq_kernel()
216 mlx5_db_free(dev->mdev, &srq->db); in create_srq_kernel()
220 static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq) in destroy_srq_user() argument
222 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db); in destroy_srq_user()
223 ib_umem_release(srq->umem); in destroy_srq_user()
227 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) in destroy_srq_kernel() argument
229 kfree(srq->wrid); in destroy_srq_kernel()
230 mlx5_buf_free(dev->mdev, &srq->buf); in destroy_srq_kernel()
231 mlx5_db_free(dev->mdev, &srq->db); in destroy_srq_kernel()
240 struct mlx5_ib_srq *srq; in mlx5_ib_create_srq() local
258 srq = kmalloc(sizeof(*srq), GFP_KERNEL); in mlx5_ib_create_srq()
259 if (!srq) in mlx5_ib_create_srq()
262 mutex_init(&srq->mutex); in mlx5_ib_create_srq()
263 spin_lock_init(&srq->lock); in mlx5_ib_create_srq()
264 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1); in mlx5_ib_create_srq()
265 srq->msrq.max_gs = init_attr->attr.max_sge; in mlx5_ib_create_srq()
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); in mlx5_ib_create_srq()
271 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / in mlx5_ib_create_srq()
273 srq->msrq.wqe_shift = ilog2(desc_size); in mlx5_ib_create_srq()
274 buf_size = srq->msrq.max * desc_size; in mlx5_ib_create_srq()
276 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, in mlx5_ib_create_srq()
277 srq->msrq.max_avail_gather); in mlx5_ib_create_srq()
280 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); in mlx5_ib_create_srq()
282 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); in mlx5_ib_create_srq()
291 in->ctx.state_log_sz = ilog2(srq->msrq.max); in mlx5_ib_create_srq()
292 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; in mlx5_ib_create_srq()
305 in->ctx.db_record = cpu_to_be64(srq->db.dma); in mlx5_ib_create_srq()
306 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); in mlx5_ib_create_srq()
313 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); in mlx5_ib_create_srq()
315 srq->msrq.event = mlx5_ib_srq_event; in mlx5_ib_create_srq()
316 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn; in mlx5_ib_create_srq()
319 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) { in mlx5_ib_create_srq()
325 init_attr->attr.max_wr = srq->msrq.max - 1; in mlx5_ib_create_srq()
327 return &srq->ibsrq; in mlx5_ib_create_srq()
330 mlx5_core_destroy_srq(dev->mdev, &srq->msrq); in mlx5_ib_create_srq()
334 destroy_srq_user(pd, srq); in mlx5_ib_create_srq()
336 destroy_srq_kernel(dev, srq); in mlx5_ib_create_srq()
339 kfree(srq); in mlx5_ib_create_srq()
348 struct mlx5_ib_srq *srq = to_msrq(ibsrq); in mlx5_ib_modify_srq() local
356 if (attr->srq_limit >= srq->msrq.max) in mlx5_ib_modify_srq()
359 mutex_lock(&srq->mutex); in mlx5_ib_modify_srq()
360 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); in mlx5_ib_modify_srq()
361 mutex_unlock(&srq->mutex); in mlx5_ib_modify_srq()
373 struct mlx5_ib_srq *srq = to_msrq(ibsrq); in mlx5_ib_query_srq() local
381 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); in mlx5_ib_query_srq()
386 srq_attr->max_wr = srq->msrq.max - 1; in mlx5_ib_query_srq()
387 srq_attr->max_sge = srq->msrq.max_gs; in mlx5_ib_query_srq()
394 int mlx5_ib_destroy_srq(struct ib_srq *srq) in mlx5_ib_destroy_srq() argument
396 struct mlx5_ib_dev *dev = to_mdev(srq->device); in mlx5_ib_destroy_srq()
397 struct mlx5_ib_srq *msrq = to_msrq(srq); in mlx5_ib_destroy_srq()
401 if (srq->uobject) { in mlx5_ib_destroy_srq()
402 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); in mlx5_ib_destroy_srq()
408 kfree(srq); in mlx5_ib_destroy_srq()
412 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index) in mlx5_ib_free_srq_wqe() argument
417 spin_lock(&srq->lock); in mlx5_ib_free_srq_wqe()
419 next = get_wqe(srq, srq->tail); in mlx5_ib_free_srq_wqe()
421 srq->tail = wqe_index; in mlx5_ib_free_srq_wqe()
423 spin_unlock(&srq->lock); in mlx5_ib_free_srq_wqe()
429 struct mlx5_ib_srq *srq = to_msrq(ibsrq); in mlx5_ib_post_srq_recv() local
437 spin_lock_irqsave(&srq->lock, flags); in mlx5_ib_post_srq_recv()
440 if (unlikely(wr->num_sge > srq->msrq.max_gs)) { in mlx5_ib_post_srq_recv()
446 if (unlikely(srq->head == srq->tail)) { in mlx5_ib_post_srq_recv()
452 srq->wrid[srq->head] = wr->wr_id; in mlx5_ib_post_srq_recv()
454 next = get_wqe(srq, srq->head); in mlx5_ib_post_srq_recv()
455 srq->head = be16_to_cpu(next->next_wqe_index); in mlx5_ib_post_srq_recv()
464 if (i < srq->msrq.max_avail_gather) { in mlx5_ib_post_srq_recv()
472 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
479 *srq->db.db = cpu_to_be32(srq->wqe_ctr); in mlx5_ib_post_srq_recv()
482 spin_unlock_irqrestore(&srq->lock, flags); in mlx5_ib_post_srq_recv()