Home
last modified time | relevance | path

Searched refs:r_rq (Results 1 – 13 of 13) sorted by relevance

/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_qp.c359 if (qp->r_rq.wq) { in ipath_reset_qp()
360 qp->r_rq.wq->head = 0; in ipath_reset_qp()
361 qp->r_rq.wq->tail = 0; in ipath_reset_qp()
409 if (qp->r_rq.wq) { in ipath_error_qp()
414 spin_lock(&qp->r_rq.lock); in ipath_error_qp()
417 wq = qp->r_rq.wq; in ipath_error_qp()
419 if (head >= qp->r_rq.size) in ipath_error_qp()
422 if (tail >= qp->r_rq.size) in ipath_error_qp()
425 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in ipath_error_qp()
426 if (++tail >= qp->r_rq.size) in ipath_error_qp()
[all …]
Dipath_verbs.c491 struct ipath_rwq *wq = qp->r_rq.wq; in ipath_post_receive()
507 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in ipath_post_receive()
513 spin_lock_irqsave(&qp->r_rq.lock, flags); in ipath_post_receive()
515 if (next >= qp->r_rq.size) in ipath_post_receive()
518 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive()
524 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in ipath_post_receive()
532 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive()
Dipath_ud.c113 rq = &qp->r_rq; in ipath_ud_loopback()
Dipath_ruc.c183 rq = &qp->r_rq; in ipath_get_rwqe()
Dipath_verbs.h441 struct ipath_rq r_rq; /* receive work queue */ member
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_qp.c411 if (qp->r_rq.wq) { in qib_reset_qp()
412 qp->r_rq.wq->head = 0; in qib_reset_qp()
413 qp->r_rq.wq->tail = 0; in qib_reset_qp()
529 if (qp->r_rq.wq) { in qib_error_qp()
534 spin_lock(&qp->r_rq.lock); in qib_error_qp()
537 wq = qp->r_rq.wq; in qib_error_qp()
539 if (head >= qp->r_rq.size) in qib_error_qp()
542 if (tail >= qp->r_rq.size) in qib_error_qp()
545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in qib_error_qp()
546 if (++tail >= qp->r_rq.size) in qib_error_qp()
[all …]
Dqib_verbs.c514 struct qib_rwq *wq = qp->r_rq.wq; in qib_post_receive()
530 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in qib_post_receive()
536 spin_lock_irqsave(&qp->r_rq.lock, flags); in qib_post_receive()
538 if (next >= qp->r_rq.size) in qib_post_receive()
541 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive()
547 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in qib_post_receive()
555 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive()
Dqib_ruc.c157 rq = &qp->r_rq; in qib_get_rwqe()
Dqib_verbs.h489 struct qib_rq r_rq; /* receive work queue */ member
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dqp.c396 if (qp->r_rq.wq) { in reset_qp()
397 qp->r_rq.wq->head = 0; in reset_qp()
398 qp->r_rq.wq->tail = 0; in reset_qp()
513 if (qp->r_rq.wq) { in hfi1_error_qp()
518 spin_lock(&qp->r_rq.lock); in hfi1_error_qp()
521 wq = qp->r_rq.wq; in hfi1_error_qp()
523 if (head >= qp->r_rq.size) in hfi1_error_qp()
526 if (tail >= qp->r_rq.size) in hfi1_error_qp()
529 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in hfi1_error_qp()
530 if (++tail >= qp->r_rq.size) in hfi1_error_qp()
[all …]
Dverbs.c532 struct hfi1_rwq *wq = qp->r_rq.wq; in post_receive()
548 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in post_receive()
554 spin_lock_irqsave(&qp->r_rq.lock, flags); in post_receive()
556 if (next >= qp->r_rq.size) in post_receive()
559 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in post_receive()
565 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in post_receive()
573 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in post_receive()
Dverbs.h499 struct hfi1_rq r_rq; /* receive work queue */ member
Druc.c175 rq = &qp->r_rq; in hfi1_get_rwqe()