Lines Matching refs:srq
72 static void *get_wqe(struct mthca_srq *srq, int n) in get_wqe() argument
74 if (srq->is_direct) in get_wqe()
75 return srq->queue.direct.buf + (n << srq->wqe_shift); in get_wqe()
77 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + in get_wqe()
78 ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); in get_wqe()
97 struct mthca_srq *srq, in mthca_tavor_init_srq_context() argument
102 context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); in mthca_tavor_init_srq_context()
104 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_tavor_init_srq_context()
115 struct mthca_srq *srq, in mthca_arbel_init_srq_context() argument
126 max = srq->max; in mthca_arbel_init_srq_context()
128 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); in mthca_arbel_init_srq_context()
129 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); in mthca_arbel_init_srq_context()
130 context->db_index = cpu_to_be32(srq->db_index); in mthca_arbel_init_srq_context()
131 context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); in mthca_arbel_init_srq_context()
140 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq_buf() argument
142 mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, in mthca_free_srq_buf()
143 srq->is_direct, &srq->mr); in mthca_free_srq_buf()
144 kfree(srq->wrid); in mthca_free_srq_buf()
148 struct mthca_srq *srq) in mthca_alloc_srq_buf() argument
158 srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); in mthca_alloc_srq_buf()
159 if (!srq->wrid) in mthca_alloc_srq_buf()
162 err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, in mthca_alloc_srq_buf()
164 &srq->queue, &srq->is_direct, pd, 1, &srq->mr); in mthca_alloc_srq_buf()
166 kfree(srq->wrid); in mthca_alloc_srq_buf()
175 for (i = 0; i < srq->max; ++i) { in mthca_alloc_srq_buf()
178 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf()
180 if (i < srq->max - 1) { in mthca_alloc_srq_buf()
182 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1); in mthca_alloc_srq_buf()
189 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf()
194 srq->last = get_wqe(srq, srq->max - 1); in mthca_alloc_srq_buf()
200 struct ib_srq_attr *attr, struct mthca_srq *srq) in mthca_alloc_srq() argument
211 srq->max = attr->max_wr; in mthca_alloc_srq()
212 srq->max_gs = attr->max_sge; in mthca_alloc_srq()
213 srq->counter = 0; in mthca_alloc_srq()
216 srq->max = roundup_pow_of_two(srq->max + 1); in mthca_alloc_srq()
218 srq->max = srq->max + 1; in mthca_alloc_srq()
222 srq->max_gs * sizeof (struct mthca_data_seg))); in mthca_alloc_srq()
227 srq->wqe_shift = ilog2(ds); in mthca_alloc_srq()
229 srq->srqn = mthca_alloc(&dev->srq_table.alloc); in mthca_alloc_srq()
230 if (srq->srqn == -1) in mthca_alloc_srq()
234 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
239 srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, in mthca_alloc_srq()
240 srq->srqn, &srq->db); in mthca_alloc_srq()
241 if (srq->db_index < 0) { in mthca_alloc_srq()
254 err = mthca_alloc_srq_buf(dev, pd, srq); in mthca_alloc_srq()
258 spin_lock_init(&srq->lock); in mthca_alloc_srq()
259 srq->refcount = 1; in mthca_alloc_srq()
260 init_waitqueue_head(&srq->wait); in mthca_alloc_srq()
261 mutex_init(&srq->mutex); in mthca_alloc_srq()
264 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); in mthca_alloc_srq()
266 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); in mthca_alloc_srq()
268 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
276 if (mthca_array_set(&dev->srq_table.srq, in mthca_alloc_srq()
277 srq->srqn & (dev->limits.num_srqs - 1), in mthca_alloc_srq()
278 srq)) { in mthca_alloc_srq()
286 srq->first_free = 0; in mthca_alloc_srq()
287 srq->last_free = srq->max - 1; in mthca_alloc_srq()
289 attr->max_wr = srq->max - 1; in mthca_alloc_srq()
290 attr->max_sge = srq->max_gs; in mthca_alloc_srq()
295 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_alloc_srq()
301 mthca_free_srq_buf(dev, srq); in mthca_alloc_srq()
308 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_alloc_srq()
311 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_alloc_srq()
314 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_alloc_srq()
319 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) in get_srq_refcount() argument
324 c = srq->refcount; in get_srq_refcount()
330 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) in mthca_free_srq() argument
341 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); in mthca_free_srq()
346 mthca_array_clear(&dev->srq_table.srq, in mthca_free_srq()
347 srq->srqn & (dev->limits.num_srqs - 1)); in mthca_free_srq()
348 --srq->refcount; in mthca_free_srq()
351 wait_event(srq->wait, !get_srq_refcount(dev, srq)); in mthca_free_srq()
353 if (!srq->ibsrq.uobject) { in mthca_free_srq()
354 mthca_free_srq_buf(dev, srq); in mthca_free_srq()
356 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); in mthca_free_srq()
359 mthca_table_put(dev, dev->srq_table.table, srq->srqn); in mthca_free_srq()
360 mthca_free(&dev->srq_table.alloc, srq->srqn); in mthca_free_srq()
368 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_modify_srq() local
376 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; in mthca_modify_srq()
380 mutex_lock(&srq->mutex); in mthca_modify_srq()
381 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit); in mthca_modify_srq()
382 mutex_unlock(&srq->mutex); in mthca_modify_srq()
391 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_query_srq() local
401 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox); in mthca_query_srq()
413 srq_attr->max_wr = srq->max - 1; in mthca_query_srq()
414 srq_attr->max_sge = srq->max_gs; in mthca_query_srq()
425 struct mthca_srq *srq; in mthca_srq_event() local
429 srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); in mthca_srq_event()
430 if (srq) in mthca_srq_event()
431 ++srq->refcount; in mthca_srq_event()
434 if (!srq) { in mthca_srq_event()
439 if (!srq->ibsrq.event_handler) in mthca_srq_event()
444 event.element.srq = &srq->ibsrq; in mthca_srq_event()
445 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); in mthca_srq_event()
449 if (!--srq->refcount) in mthca_srq_event()
450 wake_up(&srq->wait); in mthca_srq_event()
457 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) in mthca_free_srq_wqe() argument
462 ind = wqe_addr >> srq->wqe_shift; in mthca_free_srq_wqe()
464 spin_lock(&srq->lock); in mthca_free_srq_wqe()
466 last_free = get_wqe(srq, srq->last_free); in mthca_free_srq_wqe()
468 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1); in mthca_free_srq_wqe()
469 *wqe_to_link(get_wqe(srq, ind)) = -1; in mthca_free_srq_wqe()
470 srq->last_free = ind; in mthca_free_srq_wqe()
472 spin_unlock(&srq->lock); in mthca_free_srq_wqe()
479 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_tavor_post_srq_recv() local
490 spin_lock_irqsave(&srq->lock, flags); in mthca_tavor_post_srq_recv()
492 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
495 ind = srq->first_free; in mthca_tavor_post_srq_recv()
496 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv()
500 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_tavor_post_srq_recv()
506 prev_wqe = srq->last; in mthca_tavor_post_srq_recv()
507 srq->last = wqe; in mthca_tavor_post_srq_recv()
514 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_tavor_post_srq_recv()
517 srq->last = prev_wqe; in mthca_tavor_post_srq_recv()
526 if (i < srq->max_gs) in mthca_tavor_post_srq_recv()
532 srq->wrid[ind] = wr->wr_id; in mthca_tavor_post_srq_recv()
533 srq->first_free = next_ind; in mthca_tavor_post_srq_recv()
545 mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8, in mthca_tavor_post_srq_recv()
549 first_ind = srq->first_free; in mthca_tavor_post_srq_recv()
560 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
571 spin_unlock_irqrestore(&srq->lock, flags); in mthca_tavor_post_srq_recv()
579 struct mthca_srq *srq = to_msrq(ibsrq); in mthca_arbel_post_srq_recv() local
588 spin_lock_irqsave(&srq->lock, flags); in mthca_arbel_post_srq_recv()
591 ind = srq->first_free; in mthca_arbel_post_srq_recv()
592 wqe = get_wqe(srq, ind); in mthca_arbel_post_srq_recv()
596 mthca_err(dev, "SRQ %06x full\n", srq->srqn); in mthca_arbel_post_srq_recv()
607 if (unlikely(wr->num_sge > srq->max_gs)) { in mthca_arbel_post_srq_recv()
618 if (i < srq->max_gs) in mthca_arbel_post_srq_recv()
621 srq->wrid[ind] = wr->wr_id; in mthca_arbel_post_srq_recv()
622 srq->first_free = next_ind; in mthca_arbel_post_srq_recv()
626 srq->counter += nreq; in mthca_arbel_post_srq_recv()
633 *srq->db = cpu_to_be32(srq->counter); in mthca_arbel_post_srq_recv()
636 spin_unlock_irqrestore(&srq->lock, flags); in mthca_arbel_post_srq_recv()
681 err = mthca_array_init(&dev->srq_table.srq, in mthca_init_srq_table()
694 mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); in mthca_cleanup_srq_table()