Searched refs:r_rq (Results 1 – 13 of 13) sorted by relevance
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_qp.c | 359 if (qp->r_rq.wq) { in ipath_reset_qp() 360 qp->r_rq.wq->head = 0; in ipath_reset_qp() 361 qp->r_rq.wq->tail = 0; in ipath_reset_qp() 409 if (qp->r_rq.wq) { in ipath_error_qp() 414 spin_lock(&qp->r_rq.lock); in ipath_error_qp() 417 wq = qp->r_rq.wq; in ipath_error_qp() 419 if (head >= qp->r_rq.size) in ipath_error_qp() 422 if (tail >= qp->r_rq.size) in ipath_error_qp() 425 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in ipath_error_qp() 426 if (++tail >= qp->r_rq.size) in ipath_error_qp() [all …]
|
D | ipath_verbs.c | 491 struct ipath_rwq *wq = qp->r_rq.wq; in ipath_post_receive() 507 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in ipath_post_receive() 513 spin_lock_irqsave(&qp->r_rq.lock, flags); in ipath_post_receive() 515 if (next >= qp->r_rq.size) in ipath_post_receive() 518 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive() 524 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in ipath_post_receive() 532 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive()
|
D | ipath_ud.c | 113 rq = &qp->r_rq; in ipath_ud_loopback()
|
D | ipath_ruc.c | 183 rq = &qp->r_rq; in ipath_get_rwqe()
|
D | ipath_verbs.h | 441 struct ipath_rq r_rq; /* receive work queue */ member
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_qp.c | 411 if (qp->r_rq.wq) { in qib_reset_qp() 412 qp->r_rq.wq->head = 0; in qib_reset_qp() 413 qp->r_rq.wq->tail = 0; in qib_reset_qp() 529 if (qp->r_rq.wq) { in qib_error_qp() 534 spin_lock(&qp->r_rq.lock); in qib_error_qp() 537 wq = qp->r_rq.wq; in qib_error_qp() 539 if (head >= qp->r_rq.size) in qib_error_qp() 542 if (tail >= qp->r_rq.size) in qib_error_qp() 545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in qib_error_qp() 546 if (++tail >= qp->r_rq.size) in qib_error_qp() [all …]
|
D | qib_verbs.c | 514 struct qib_rwq *wq = qp->r_rq.wq; in qib_post_receive() 530 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in qib_post_receive() 536 spin_lock_irqsave(&qp->r_rq.lock, flags); in qib_post_receive() 538 if (next >= qp->r_rq.size) in qib_post_receive() 541 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive() 547 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in qib_post_receive() 555 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in qib_post_receive()
|
D | qib_ruc.c | 157 rq = &qp->r_rq; in qib_get_rwqe()
|
D | qib_verbs.h | 489 struct qib_rq r_rq; /* receive work queue */ member
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | qp.c | 396 if (qp->r_rq.wq) { in reset_qp() 397 qp->r_rq.wq->head = 0; in reset_qp() 398 qp->r_rq.wq->tail = 0; in reset_qp() 513 if (qp->r_rq.wq) { in hfi1_error_qp() 518 spin_lock(&qp->r_rq.lock); in hfi1_error_qp() 521 wq = qp->r_rq.wq; in hfi1_error_qp() 523 if (head >= qp->r_rq.size) in hfi1_error_qp() 526 if (tail >= qp->r_rq.size) in hfi1_error_qp() 529 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; in hfi1_error_qp() 530 if (++tail >= qp->r_rq.size) in hfi1_error_qp() [all …]
|
D | verbs.c | 532 struct hfi1_rwq *wq = qp->r_rq.wq; in post_receive() 548 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in post_receive() 554 spin_lock_irqsave(&qp->r_rq.lock, flags); in post_receive() 556 if (next >= qp->r_rq.size) in post_receive() 559 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in post_receive() 565 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in post_receive() 573 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in post_receive()
|
D | verbs.h | 499 struct hfi1_rq r_rq; /* receive work queue */ member
|
D | ruc.c | 175 rq = &qp->r_rq; in hfi1_get_rwqe()
|