Lines Matching refs:wr
365 struct ib_send_wr *wr, int max, u32 *plenp) in build_immd() argument
373 for (i = 0; i < wr->num_sge; i++) { in build_immd()
374 if ((plen + wr->sg_list[i].length) > max) in build_immd()
376 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
377 plen += wr->sg_list[i].length; in build_immd()
378 rem = wr->sg_list[i].length; in build_immd()
435 struct ib_send_wr *wr, u8 *len16) in build_rdma_send() argument
441 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
443 switch (wr->opcode) { in build_rdma_send()
445 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
454 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
470 if (wr->num_sge) { in build_rdma_send()
471 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_send()
472 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
482 wr->sg_list, wr->num_sge, &plen); in build_rdma_send()
486 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_send()
502 struct ib_send_wr *wr, u8 *len16) in build_rdma_write() argument
508 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_write()
511 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); in build_rdma_write()
512 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); in build_rdma_write()
513 if (wr->num_sge) { in build_rdma_write()
514 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_write()
515 ret = build_immd(sq, wqe->write.u.immd_src, wr, in build_rdma_write()
525 wr->sg_list, wr->num_sge, &plen); in build_rdma_write()
529 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write()
544 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) in build_rdma_read() argument
546 if (wr->num_sge > 1) in build_rdma_read()
548 if (wr->num_sge) { in build_rdma_read()
549 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); in build_rdma_read()
550 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr in build_rdma_read()
552 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); in build_rdma_read()
553 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); in build_rdma_read()
554 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); in build_rdma_read()
555 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr in build_rdma_read()
557 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); in build_rdma_read()
574 struct ib_recv_wr *wr, u8 *len16) in build_rdma_recv() argument
580 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_rdma_recv()
584 wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_rdma_recv()
589 struct ib_send_wr *wr, u8 *len16, u8 t5dev) in build_fastreg() argument
595 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); in build_fastreg()
598 if (wr->wr.fast_reg.page_list_len > in build_fastreg()
603 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; in build_fastreg()
605 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); in build_fastreg()
607 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); in build_fastreg()
608 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); in build_fastreg()
609 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); in build_fastreg()
610 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & in build_fastreg()
615 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); in build_fastreg()
618 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { in build_fastreg()
619 wr->wr.fast_reg.page_list->page_list[i] = (__force u64) in build_fastreg()
621 wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
640 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { in build_fastreg()
642 (u64)wr->wr.fast_reg.page_list->page_list[i]); in build_fastreg()
660 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, in build_inv_stag() argument
663 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_inv_stag()
724 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, in c4iw_post_send() argument
749 while (wr) { in c4iw_post_send()
752 *bad_wr = wr; in c4iw_post_send()
759 if (wr->send_flags & IB_SEND_SOLICITED) in c4iw_post_send()
761 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
764 switch (wr->opcode) { in c4iw_post_send()
767 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
770 if (wr->opcode == IB_WR_SEND) in c4iw_post_send()
774 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
779 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
785 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) in c4iw_post_send()
789 err = build_rdma_read(wqe, wr, &len16); in c4iw_post_send()
792 swsqe->read_len = wr->sg_list[0].length; in c4iw_post_send()
799 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, in c4iw_post_send()
805 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
809 err = build_inv_stag(wqe, wr, &len16); in c4iw_post_send()
813 wr->opcode); in c4iw_post_send()
817 *bad_wr = wr; in c4iw_post_send()
822 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || in c4iw_post_send()
825 swsqe->wr_id = wr->wr_id; in c4iw_post_send()
835 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
837 wr = wr->next; in c4iw_post_send()
853 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, in c4iw_post_receive() argument
875 while (wr) { in c4iw_post_receive()
876 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_receive()
878 *bad_wr = wr; in c4iw_post_receive()
885 err = build_rdma_recv(qhp, wqe, wr, &len16); in c4iw_post_receive()
889 *bad_wr = wr; in c4iw_post_receive()
893 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
910 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
913 wr = wr->next; in c4iw_post_receive()