Lines Matching refs:qp

336 static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)  in ipath_post_one_send()  argument
345 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_post_one_send()
347 spin_lock_irqsave(&qp->s_lock, flags); in ipath_post_one_send()
349 if (qp->ibqp.qp_type != IB_QPT_SMI && in ipath_post_one_send()
356 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) in ipath_post_one_send()
360 if (wr->num_sge > qp->s_max_sge) in ipath_post_one_send()
368 if (qp->ibqp.qp_type == IB_QPT_UC) { in ipath_post_one_send()
371 } else if (qp->ibqp.qp_type == IB_QPT_UD) { in ipath_post_one_send()
377 if (qp->ibqp.pd != wr->wr.ud.ah->pd) in ipath_post_one_send()
386 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) in ipath_post_one_send()
389 next = qp->s_head + 1; in ipath_post_one_send()
390 if (next >= qp->s_size) in ipath_post_one_send()
392 if (next == qp->s_last) { in ipath_post_one_send()
397 wqe = get_swqe_ptr(qp, qp->s_head); in ipath_post_one_send()
409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], in ipath_post_one_send()
418 if (qp->ibqp.qp_type == IB_QPT_UC || in ipath_post_one_send()
419 qp->ibqp.qp_type == IB_QPT_RC) { in ipath_post_one_send()
422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) in ipath_post_one_send()
424 wqe->ssn = qp->s_ssn++; in ipath_post_one_send()
425 qp->s_head = next; in ipath_post_one_send()
433 spin_unlock_irqrestore(&qp->s_lock, flags); in ipath_post_one_send()
448 struct ipath_qp *qp = to_iqp(ibqp); in ipath_post_send() local
452 err = ipath_post_one_send(qp, wr); in ipath_post_send()
460 ipath_do_send((unsigned long) qp); in ipath_post_send()
477 struct ipath_qp *qp = to_iqp(ibqp); in ipath_post_receive() local
478 struct ipath_rwq *wq = qp->r_rq.wq; in ipath_post_receive()
483 if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) { in ipath_post_receive()
494 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { in ipath_post_receive()
500 spin_lock_irqsave(&qp->r_rq.lock, flags); in ipath_post_receive()
502 if (next >= qp->r_rq.size) in ipath_post_receive()
505 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive()
511 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); in ipath_post_receive()
519 spin_unlock_irqrestore(&qp->r_rq.lock, flags); in ipath_post_receive()
542 void *data, u32 tlen, struct ipath_qp *qp) in ipath_qp_rcv() argument
545 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { in ipath_qp_rcv()
550 switch (qp->ibqp.qp_type) { in ipath_qp_rcv()
557 ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp); in ipath_qp_rcv()
561 ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp); in ipath_qp_rcv()
565 ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp); in ipath_qp_rcv()
588 struct ipath_qp *qp; in ipath_ib_rcv() local
644 ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp); in ipath_ib_rcv()
652 qp = ipath_lookup_qpn(&dev->qp_table, qp_num); in ipath_ib_rcv()
653 if (qp) { in ipath_ib_rcv()
656 tlen, qp); in ipath_ib_rcv()
661 if (atomic_dec_and_test(&qp->refcount)) in ipath_ib_rcv()
662 wake_up(&qp->wait); in ipath_ib_rcv()
682 struct ipath_qp *qp; in ipath_ib_timer() local
695 qp = list_entry(last->next, struct ipath_qp, timerwait); in ipath_ib_timer()
696 list_del_init(&qp->timerwait); in ipath_ib_timer()
697 qp->timer_next = resend; in ipath_ib_timer()
698 resend = qp; in ipath_ib_timer()
699 atomic_inc(&qp->refcount); in ipath_ib_timer()
703 qp = list_entry(last->next, struct ipath_qp, timerwait); in ipath_ib_timer()
704 if (--qp->s_rnr_timeout == 0) { in ipath_ib_timer()
706 list_del_init(&qp->timerwait); in ipath_ib_timer()
707 qp->timer_next = rnr; in ipath_ib_timer()
708 rnr = qp; in ipath_ib_timer()
709 atomic_inc(&qp->refcount); in ipath_ib_timer()
712 qp = list_entry(last->next, struct ipath_qp, in ipath_ib_timer()
714 } while (qp->s_rnr_timeout == 0); in ipath_ib_timer()
750 qp = resend; in ipath_ib_timer()
751 resend = qp->timer_next; in ipath_ib_timer()
753 spin_lock_irqsave(&qp->s_lock, flags); in ipath_ib_timer()
754 if (qp->s_last != qp->s_tail && in ipath_ib_timer()
755 ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { in ipath_ib_timer()
757 ipath_restart_rc(qp, qp->s_last_psn + 1); in ipath_ib_timer()
759 spin_unlock_irqrestore(&qp->s_lock, flags); in ipath_ib_timer()
762 if (atomic_dec_and_test(&qp->refcount)) in ipath_ib_timer()
763 wake_up(&qp->wait); in ipath_ib_timer()
766 qp = rnr; in ipath_ib_timer()
767 rnr = qp->timer_next; in ipath_ib_timer()
769 spin_lock_irqsave(&qp->s_lock, flags); in ipath_ib_timer()
770 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) in ipath_ib_timer()
771 ipath_schedule_send(qp); in ipath_ib_timer()
772 spin_unlock_irqrestore(&qp->s_lock, flags); in ipath_ib_timer()
775 if (atomic_dec_and_test(&qp->refcount)) in ipath_ib_timer()
776 wake_up(&qp->wait); in ipath_ib_timer()
1031 struct ipath_qp *qp = tx->qp; in sdma_complete() local
1032 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in sdma_complete()
1037 if (atomic_dec_and_test(&qp->s_dma_busy)) { in sdma_complete()
1038 spin_lock_irqsave(&qp->s_lock, flags); in sdma_complete()
1040 ipath_send_complete(qp, tx->wqe, ibs); in sdma_complete()
1041 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && in sdma_complete()
1042 qp->s_last != qp->s_head) || in sdma_complete()
1043 (qp->s_flags & IPATH_S_WAIT_DMA)) in sdma_complete()
1044 ipath_schedule_send(qp); in sdma_complete()
1045 spin_unlock_irqrestore(&qp->s_lock, flags); in sdma_complete()
1046 wake_up(&qp->wait_dma); in sdma_complete()
1048 spin_lock_irqsave(&qp->s_lock, flags); in sdma_complete()
1049 ipath_send_complete(qp, tx->wqe, ibs); in sdma_complete()
1050 spin_unlock_irqrestore(&qp->s_lock, flags); in sdma_complete()
1057 if (atomic_dec_and_test(&qp->refcount)) in sdma_complete()
1058 wake_up(&qp->wait); in sdma_complete()
1061 static void decrement_dma_busy(struct ipath_qp *qp) in decrement_dma_busy() argument
1065 if (atomic_dec_and_test(&qp->s_dma_busy)) { in decrement_dma_busy()
1066 spin_lock_irqsave(&qp->s_lock, flags); in decrement_dma_busy()
1067 if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && in decrement_dma_busy()
1068 qp->s_last != qp->s_head) || in decrement_dma_busy()
1069 (qp->s_flags & IPATH_S_WAIT_DMA)) in decrement_dma_busy()
1070 ipath_schedule_send(qp); in decrement_dma_busy()
1071 spin_unlock_irqrestore(&qp->s_lock, flags); in decrement_dma_busy()
1072 wake_up(&qp->wait_dma); in decrement_dma_busy()
1097 static int ipath_verbs_send_dma(struct ipath_qp *qp, in ipath_verbs_send_dma() argument
1102 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); in ipath_verbs_send_dma()
1110 tx = qp->s_tx; in ipath_verbs_send_dma()
1112 qp->s_tx = NULL; in ipath_verbs_send_dma()
1114 atomic_inc(&qp->s_dma_busy); in ipath_verbs_send_dma()
1117 qp->s_tx = tx; in ipath_verbs_send_dma()
1118 decrement_dma_busy(qp); in ipath_verbs_send_dma()
1134 control = qp->s_pkt_delay; in ipath_verbs_send_dma()
1135 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); in ipath_verbs_send_dma()
1137 tx->qp = qp; in ipath_verbs_send_dma()
1138 atomic_inc(&qp->refcount); in ipath_verbs_send_dma()
1139 tx->wqe = qp->s_wqe; in ipath_verbs_send_dma()
1170 atomic_inc(&qp->s_dma_busy); in ipath_verbs_send_dma()
1176 qp->s_tx = tx; in ipath_verbs_send_dma()
1177 decrement_dma_busy(qp); in ipath_verbs_send_dma()
1198 atomic_inc(&qp->s_dma_busy); in ipath_verbs_send_dma()
1208 qp->s_tx = tx; in ipath_verbs_send_dma()
1209 decrement_dma_busy(qp); in ipath_verbs_send_dma()
1215 if (atomic_dec_and_test(&qp->refcount)) in ipath_verbs_send_dma()
1216 wake_up(&qp->wait); in ipath_verbs_send_dma()
1222 static int ipath_verbs_send_pio(struct ipath_qp *qp, in ipath_verbs_send_pio() argument
1227 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_verbs_send_pio()
1246 control = qp->s_pkt_delay; in ipath_verbs_send_pio()
1247 qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult); in ipath_verbs_send_pio()
1304 if (qp->s_wqe) { in ipath_verbs_send_pio()
1305 spin_lock_irqsave(&qp->s_lock, flags); in ipath_verbs_send_pio()
1306 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); in ipath_verbs_send_pio()
1307 spin_unlock_irqrestore(&qp->s_lock, flags); in ipath_verbs_send_pio()
1322 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, in ipath_verbs_send() argument
1325 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd; in ipath_verbs_send()
1341 if (qp->ibqp.qp_type == IB_QPT_SMI || in ipath_verbs_send()
1343 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, in ipath_verbs_send()
1346 ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, in ipath_verbs_send()
1461 struct ipath_qp *qp; in ipath_ib_piobufavail() local
1472 qp = list_entry(list->next, struct ipath_qp, piowait); in ipath_ib_piobufavail()
1473 list_del_init(&qp->piowait); in ipath_ib_piobufavail()
1474 qp->pio_next = qplist; in ipath_ib_piobufavail()
1475 qplist = qp; in ipath_ib_piobufavail()
1476 atomic_inc(&qp->refcount); in ipath_ib_piobufavail()
1481 qp = qplist; in ipath_ib_piobufavail()
1482 qplist = qp->pio_next; in ipath_ib_piobufavail()
1484 spin_lock_irqsave(&qp->s_lock, flags); in ipath_ib_piobufavail()
1485 if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) in ipath_ib_piobufavail()
1486 ipath_schedule_send(qp); in ipath_ib_piobufavail()
1487 spin_unlock_irqrestore(&qp->s_lock, flags); in ipath_ib_piobufavail()
1490 if (atomic_dec_and_test(&qp->refcount)) in ipath_ib_piobufavail()
1491 wake_up(&qp->wait); in ipath_ib_piobufavail()