/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, build_rdma_send() argument 51 wqe->send.rdmaop = T3_SEND_WITH_SE; build_rdma_send() 53 wqe->send.rdmaop = T3_SEND; build_rdma_send() 54 wqe->send.rem_stag = 0; build_rdma_send() 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; build_rdma_send() 60 wqe->send.rdmaop = T3_SEND_WITH_INV; build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); build_rdma_send() 68 wqe->send.reserved[0] = 0; build_rdma_send() 69 wqe->send.reserved[1] = 0; build_rdma_send() 70 wqe->send.reserved[2] = 0; build_rdma_send() 77 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); build_rdma_send() 78 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_rdma_send() 79 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); build_rdma_send() 81 wqe->send.num_sgle = cpu_to_be32(wr->num_sge); build_rdma_send() 83 wqe->send.plen = cpu_to_be32(plen); build_rdma_send() 87 static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, build_rdma_write() argument 94 wqe->write.rdmaop = T3_RDMA_WRITE; build_rdma_write() 95 wqe->write.reserved[0] = 0; build_rdma_write() 96 wqe->write.reserved[1] = 0; build_rdma_write() 97 wqe->write.reserved[2] = 0; build_rdma_write() 98 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); build_rdma_write() 99 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); build_rdma_write() 103 wqe->write.sgl[0].stag = wr->ex.imm_data; build_rdma_write() 104 wqe->write.sgl[0].len = cpu_to_be32(0); build_rdma_write() 105 wqe->write.num_sgle = cpu_to_be32(0); build_rdma_write() 114 wqe->write.sgl[i].stag = build_rdma_write() 116 wqe->write.sgl[i].len = build_rdma_write() 118 wqe->write.sgl[i].to = build_rdma_write() 121 wqe->write.num_sgle = cpu_to_be32(wr->num_sge); build_rdma_write() 124 wqe->write.plen = cpu_to_be32(plen); build_rdma_write() 128 static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, build_rdma_read() argument 133 wqe->read.rdmaop = T3_READ_REQ; build_rdma_read() 135 wqe->read.local_inv = 1; build_rdma_read() 137 wqe->read.local_inv = 0; build_rdma_read() 138 wqe->read.reserved[0] = 0; build_rdma_read() 139 wqe->read.reserved[1] = 0; build_rdma_read() 140 wqe->read.rem_stag = cpu_to_be32(rdma_wr(wr)->rkey); build_rdma_read() 141 wqe->read.rem_to = cpu_to_be64(rdma_wr(wr)->remote_addr); build_rdma_read() 142 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey); build_rdma_read() 143 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length); build_rdma_read() 144 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr); build_rdma_read() 149 static int build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr, build_memreg() argument 159 wqe->fastreg.stag = cpu_to_be32(wr->key); build_memreg() 160 wqe->fastreg.len = cpu_to_be32(mhp->ibmr.length); build_memreg() 161 wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32); build_memreg() 162 wqe->fastreg.va_base_lo_fbo = build_memreg() 164 wqe->fastreg.page_type_perms = cpu_to_be32( build_memreg() 169 p = &wqe->fastreg.pbl_addrs[0]; build_memreg() 175 wqe = (union t3_wr *)(wq->queue + build_memreg() 177 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0, build_memreg() 182 p = &wqe->pbl_frag.pbl_addrs[0]; build_memreg() 192 static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, build_inv_stag() argument 195 wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey); build_inv_stag() 196 wqe->local_inv.reserved = 0; build_inv_stag() 248 static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, build_rdma_recv() argument 259 wqe->recv.pagesz[0] = page_size[0]; build_rdma_recv() 260 wqe->recv.pagesz[1] = page_size[1]; build_rdma_recv() 261 wqe->recv.pagesz[2] = page_size[2]; build_rdma_recv() 262 wqe->recv.pagesz[3] = page_size[3]; build_rdma_recv() 263 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); build_rdma_recv() 265 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); build_rdma_recv() 266 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_rdma_recv() 269 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) & build_rdma_recv() 273 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]); build_rdma_recv() 276 wqe->recv.sgl[i].stag = 0; build_rdma_recv() 277 wqe->recv.sgl[i].len = 0; build_rdma_recv() 278 wqe->recv.sgl[i].to = 0; build_rdma_recv() 279 wqe->recv.pbl_addr[i] = 0; build_rdma_recv() 288 static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, build_zero_stag_recv() argument 311 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); build_zero_stag_recv() 324 wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT; build_zero_stag_recv() 331 wqe->recv.sgl[i].stag = 0; build_zero_stag_recv() 332 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); build_zero_stag_recv() 333 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr); build_zero_stag_recv() 334 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset); build_zero_stag_recv() 338 wqe->recv.pagesz[i] = 0; build_zero_stag_recv() 339 wqe->recv.sgl[i].stag = 0; build_zero_stag_recv() 340 wqe->recv.sgl[i].len = 0; build_zero_stag_recv() 341 wqe->recv.sgl[i].to = 0; build_zero_stag_recv() 342 wqe->recv.pbl_addr[i] = 0; build_zero_stag_recv() 360 union t3_wr *wqe; iwch_post_send() local 386 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_send() 400 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt); iwch_post_send() 405 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt); iwch_post_send() 411 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt); iwch_post_send() 414 sqp->read_len = wqe->read.local_len; iwch_post_send() 420 err = build_memreg(wqe, reg_wr(wr), &t3_wr_flit_cnt, iwch_post_send() 427 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt); iwch_post_send() 436 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_post_send() 443 build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags, iwch_post_send() 472 union t3_wr *wqe; iwch_post_receive() local 496 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_receive() 499 err = build_rdma_recv(qhp, wqe, wr); iwch_post_receive() 501 err = build_zero_stag_recv(qhp, wqe, wr); iwch_post_receive() 508 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG, iwch_post_receive() 512 "wqe %p \n", __func__, (unsigned long long) wr->wr_id, iwch_post_receive() 513 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); iwch_post_receive() 536 union t3_wr *wqe; iwch_bind_mw() local 565 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_bind_mw() 574 wqe->bind.reserved = 0; iwch_bind_mw() 575 wqe->bind.type = TPT_VATO; iwch_bind_mw() 578 wqe->bind.perms = iwch_ib_to_tpt_bind_access( iwch_bind_mw() 580 wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey); iwch_bind_mw() 581 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); iwch_bind_mw() 582 wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length); iwch_bind_mw() 583 wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr); iwch_bind_mw() 589 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_bind_mw() 596 wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr); iwch_bind_mw() 597 wqe->bind.mr_pagesz = page_size; iwch_bind_mw() 598 build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags, iwch_bind_mw() 745 union t3_wr *wqe; iwch_post_zb_read() local 755 wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr)); iwch_post_zb_read() 756 memset(wqe, 0, sizeof(struct t3_rdma_read_wr)); iwch_post_zb_read() 757 wqe->read.rdmaop = T3_READ_REQ; iwch_post_zb_read() 758 wqe->read.reserved[0] = 0; iwch_post_zb_read() 759 wqe->read.reserved[1] = 0; iwch_post_zb_read() 760 wqe->read.rem_stag = cpu_to_be32(1); iwch_post_zb_read() 761 wqe->read.rem_to = cpu_to_be64(1); iwch_post_zb_read() 762 wqe->read.local_stag = cpu_to_be32(1); iwch_post_zb_read() 763 wqe->read.local_len = cpu_to_be32(0); iwch_post_zb_read() 764 wqe->read.local_to = cpu_to_be64(1); iwch_post_zb_read() 765 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); iwch_post_zb_read() 766 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| iwch_post_zb_read() 777 union t3_wr *wqe; iwch_post_terminate() local 787 wqe = (union t3_wr *)skb_put(skb, 40); iwch_post_terminate() 788 memset(wqe, 0, 40); iwch_post_terminate() 789 wqe->send.rdmaop = T3_TERMINATE; iwch_post_terminate() 792 wqe->send.plen = htonl(4); iwch_post_terminate() 795 term = (struct terminate_message *)wqe->send.sgl; iwch_post_terminate() 797 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) | iwch_post_terminate() 799 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); iwch_post_terminate() 885 union t3_wr *wqe = qhp->wq.queue; iwch_rqes_posted() local 888 while (count < USHRT_MAX && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) { iwch_rqes_posted() 890 wqe++; iwch_rqes_posted()
|
H A D | cxio_hal.c | 140 struct t3_modify_qp_wr *wqe; cxio_hal_clear_qp_ctx() local 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); cxio_hal_clear_qp_ctx() 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); cxio_hal_clear_qp_ctx() 147 memset(wqe, 0, sizeof(*wqe)); cxio_hal_clear_qp_ctx() 148 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, cxio_hal_clear_qp_ctx() 151 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); cxio_hal_clear_qp_ctx() 153 wqe->sge_cmd = cpu_to_be64(sge_cmd); cxio_hal_clear_qp_ctx() 519 struct t3_modify_qp_wr *wqe; cxio_hal_init_ctrl_qp() local 522 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); cxio_hal_init_ctrl_qp() 565 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); cxio_hal_init_ctrl_qp() 566 memset(wqe, 0, sizeof(*wqe)); cxio_hal_init_ctrl_qp() 567 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0, cxio_hal_init_ctrl_qp() 569 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); cxio_hal_init_ctrl_qp() 571 wqe->sge_cmd = cpu_to_be64(sge_cmd); cxio_hal_init_ctrl_qp() 572 wqe->ctx1 = cpu_to_be64(ctx1); cxio_hal_init_ctrl_qp() 573 wqe->ctx0 = cpu_to_be64(ctx0); cxio_hal_init_ctrl_qp() 604 __be64 *wqe; cxio_hal_ctrl_qp_write_mem() local 629 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % cxio_hal_ctrl_qp_write_mem() 652 wqe += (sizeof(struct t3_bypass_wr) >> 3); cxio_hal_ctrl_qp_write_mem() 656 *wqe = cpu_to_be64(utx_cmd); cxio_hal_ctrl_qp_write_mem() 657 wqe++; cxio_hal_ctrl_qp_write_mem() 663 memcpy(wqe, copy_data, copy_len); cxio_hal_ctrl_qp_write_mem() 665 memset(wqe, 0, copy_len); cxio_hal_ctrl_qp_write_mem() 667 memset(((u8 *) wqe) + copy_len, 0, cxio_hal_ctrl_qp_write_mem() 671 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr % cxio_hal_ctrl_qp_write_mem() 675 ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr; cxio_hal_ctrl_qp_write_mem() 681 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag, cxio_hal_ctrl_qp_write_mem() 836 struct t3_rdma_init_wr *wqe; cxio_rdma_init() local 837 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC); cxio_rdma_init() 841 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe)); cxio_rdma_init() 842 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT)); cxio_rdma_init() 843 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) | cxio_rdma_init() 844 V_FW_RIWR_LEN(sizeof(*wqe) >> 3)); cxio_rdma_init() 845 wqe->wrid.id1 = 0; cxio_rdma_init() 846 wqe->qpid = cpu_to_be32(attr->qpid); cxio_rdma_init() 847 wqe->pdid = cpu_to_be32(attr->pdid); cxio_rdma_init() 848 wqe->scqid = cpu_to_be32(attr->scqid); cxio_rdma_init() 849 wqe->rcqid = cpu_to_be32(attr->rcqid); cxio_rdma_init() 850 wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base); cxio_rdma_init() 851 wqe->rq_size = cpu_to_be32(attr->rq_size); cxio_rdma_init() 852 wqe->mpaattrs = attr->mpaattrs; cxio_rdma_init() 853 wqe->qpcaps = attr->qpcaps; cxio_rdma_init() 854 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss); cxio_rdma_init() 855 wqe->rqe_count = cpu_to_be16(attr->rqe_count); cxio_rdma_init() 856 wqe->flags_rtr_type = cpu_to_be16(attr->flags | cxio_rdma_init() 859 wqe->ord = cpu_to_be32(attr->ord); cxio_rdma_init() 860 wqe->ird = cpu_to_be32(attr->ird); cxio_rdma_init() 861 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr); cxio_rdma_init() 862 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); cxio_rdma_init() 863 wqe->irs = cpu_to_be32(attr->irs); cxio_rdma_init()
|
H A D | cxio_dbg.c | 111 void cxio_dump_wqe(union t3_wr *wqe) cxio_dump_wqe() argument 113 __be64 *data = (__be64 *)wqe; cxio_dump_wqe()
|
H A D | cxio_wr.h | 416 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe) fw_riwrh_opcode() argument 418 return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags)); fw_riwrh_opcode() 427 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op, build_fw_riwrh() argument 431 wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) | build_fw_riwrh() 435 wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) | build_fw_riwrh() 439 ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit); build_fw_riwrh()
|
H A D | cxio_hal.h | 205 void cxio_dump_wqe(union t3_wr *wqe);
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 458 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, build_rdma_send() argument 470 wqe->send.sendop_pkd = cpu_to_be32( build_rdma_send() 473 wqe->send.sendop_pkd = cpu_to_be32( build_rdma_send() 475 wqe->send.stag_inv = 0; build_rdma_send() 479 wqe->send.sendop_pkd = cpu_to_be32( build_rdma_send() 482 wqe->send.sendop_pkd = cpu_to_be32( build_rdma_send() 484 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); build_rdma_send() 490 wqe->send.r3 = 0; build_rdma_send() 491 wqe->send.r4 = 0; build_rdma_send() 496 ret = build_immd(sq, wqe->send.u.immd_src, wr, build_rdma_send() 500 size = sizeof wqe->send + sizeof(struct fw_ri_immd) + build_rdma_send() 505 wqe->send.u.isgl_src, build_rdma_send() 509 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + build_rdma_send() 513 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; build_rdma_send() 514 wqe->send.u.immd_src[0].r1 = 0; build_rdma_send() 515 wqe->send.u.immd_src[0].r2 = 0; build_rdma_send() 516 wqe->send.u.immd_src[0].immdlen = 0; build_rdma_send() 517 size = sizeof wqe->send + sizeof(struct fw_ri_immd); build_rdma_send() 521 wqe->send.plen = cpu_to_be32(plen); build_rdma_send() 525 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, build_rdma_write() argument 534 wqe->write.r2 = 0; build_rdma_write() 535 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); build_rdma_write() 536 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); build_rdma_write() 539 ret = build_immd(sq, wqe->write.u.immd_src, wr, build_rdma_write() 543 size = sizeof wqe->write + sizeof(struct fw_ri_immd) + build_rdma_write() 548 wqe->write.u.isgl_src, build_rdma_write() 552 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + build_rdma_write() 556 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; build_rdma_write() 557 wqe->write.u.immd_src[0].r1 = 0; build_rdma_write() 558 wqe->write.u.immd_src[0].r2 = 0; build_rdma_write() 559 wqe->write.u.immd_src[0].immdlen = 0; build_rdma_write() 560 size = sizeof wqe->write + sizeof(struct fw_ri_immd); build_rdma_write() 564 wqe->write.plen = cpu_to_be32(plen); build_rdma_write() 568 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) build_rdma_read() argument 573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); build_rdma_read() 574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr build_rdma_read() 576 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); build_rdma_read() 577 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); build_rdma_read() 578 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); build_rdma_read() 579 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr build_rdma_read() 581 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); build_rdma_read() 583 wqe->read.stag_src = cpu_to_be32(2); build_rdma_read() 584 wqe->read.to_src_hi = 0; build_rdma_read() 585 wqe->read.to_src_lo = 0; build_rdma_read() 586 wqe->read.stag_sink = cpu_to_be32(2); build_rdma_read() 587 wqe->read.plen = 0; build_rdma_read() 588 wqe->read.to_sink_hi = 0; build_rdma_read() 589 wqe->read.to_sink_lo = 0; build_rdma_read() 591 wqe->read.r2 = 0; build_rdma_read() 592 wqe->read.r5 = 0; build_rdma_read() 593 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); build_rdma_read() 597 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, build_rdma_recv() argument 604 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); build_rdma_recv() 607 *len16 = DIV_ROUND_UP(sizeof wqe->recv + build_rdma_recv() 612 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, build_memreg() argument 625 wqe->fr.qpbinde_to_dcacpu = 0; build_memreg() 626 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; build_memreg() 627 wqe->fr.addr_type = FW_RI_VA_BASED_TO; build_memreg() 628 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); build_memreg() 629 wqe->fr.len_hi = 0; build_memreg() 630 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); build_memreg() 631 wqe->fr.stag = cpu_to_be32(wr->key); build_memreg() 632 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); build_memreg() 633 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & build_memreg() 642 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); build_memreg() 649 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); build_memreg() 651 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); build_memreg() 671 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) build_memreg() 677 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, build_inv_stag() argument 680 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); build_inv_stag() 681 wqe->inv.r2 = 0; build_inv_stag() 682 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); build_inv_stag() 747 union t4_wr *wqe = NULL; c4iw_post_send() local 770 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + c4iw_post_send() 789 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send() 794 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send() 804 err = build_rdma_read(wqe, wr, &len16); c4iw_post_send() 814 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, c4iw_post_send() 824 err = build_inv_stag(wqe, wr, &len16); c4iw_post_send() 847 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); c4iw_post_send() 858 t4_ring_sq_db(&qhp->wq, idx, wqe); c4iw_post_send() 872 union t4_recv_wr *wqe = NULL; c4iw_post_receive() local 895 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + c4iw_post_receive() 899 err = build_rdma_recv(qhp, wqe, wr, &len16); c4iw_post_receive() 916 wqe->recv.opcode = FW_RI_RECV_WR; c4iw_post_receive() 917 wqe->recv.r1 = 0; c4iw_post_receive() 918 wqe->recv.wrid = qhp->wq.rq.pidx; c4iw_post_receive() 919 wqe->recv.r2[0] = 0; c4iw_post_receive() 920 wqe->recv.r2[1] = 0; c4iw_post_receive() 921 wqe->recv.r2[2] = 0; c4iw_post_receive() 922 wqe->recv.len16 = len16; c4iw_post_receive() 931 t4_ring_rq_db(&qhp->wq, idx, wqe); c4iw_post_receive() 1084 struct fw_ri_wr *wqe; post_terminate() local 1091 skb = alloc_skb(sizeof *wqe, gfp); post_terminate() 1096 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); post_terminate() 1097 memset(wqe, 0, sizeof *wqe); post_terminate() 1098 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); post_terminate() 1099 wqe->flowid_len16 = cpu_to_be32( post_terminate() 1101 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); post_terminate() 1103 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; post_terminate() 1104 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); post_terminate() 1105 term = (struct terminate_message *)wqe->u.terminate.termmsg; post_terminate() 1205 struct fw_ri_wr *wqe; rdma_fini() local 1212 skb = alloc_skb(sizeof *wqe, GFP_KERNEL); rdma_fini() 1217 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); rdma_fini() 1218 memset(wqe, 0, sizeof *wqe); rdma_fini() 1219 wqe->op_compl = cpu_to_be32( rdma_fini() 1222 wqe->flowid_len16 = cpu_to_be32( rdma_fini() 1224 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); rdma_fini() 1225 wqe->cookie = (uintptr_t)&ep->com.wr_wait; rdma_fini() 1227 wqe->u.fini.type = FW_RI_TYPE_FINI; rdma_fini() 1266 struct fw_ri_wr *wqe; rdma_init() local 1273 skb = alloc_skb(sizeof *wqe, GFP_KERNEL); rdma_init() 1286 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); rdma_init() 1287 memset(wqe, 0, sizeof *wqe); rdma_init() 1288 wqe->op_compl = cpu_to_be32( rdma_init() 1291 wqe->flowid_len16 = cpu_to_be32( rdma_init() 1293 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); rdma_init() 1295 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; rdma_init() 1297 wqe->u.init.type = FW_RI_TYPE_INIT; rdma_init() 1298 wqe->u.init.mpareqbit_p2ptype = rdma_init() 1301 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; rdma_init() 1303 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; rdma_init() 1305 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; rdma_init() 1307 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; rdma_init() 1309 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | rdma_init() 1313 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | rdma_init() 1315 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); rdma_init() 1316 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); rdma_init() 1317 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); rdma_init() 1318 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); rdma_init() 1319 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); rdma_init() 1320 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); rdma_init() 1321 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); rdma_init() 1322 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); rdma_init() 1323 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); rdma_init() 1324 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); rdma_init() 1325 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); rdma_init() 1326 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); rdma_init() 1327 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - rdma_init() 1330 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); rdma_init()
|
H A D | t4.h | 109 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, init_wr_hdr() argument 112 wqe->send.opcode = (u8)opcode; init_wr_hdr() 113 wqe->send.flags = flags; init_wr_hdr() 114 wqe->send.wrid = wrid; init_wr_hdr() 115 wqe->send.r1[0] = 0; init_wr_hdr() 116 wqe->send.r1[1] = 0; init_wr_hdr() 117 wqe->send.r1[2] = 0; init_wr_hdr() 118 wqe->send.len16 = len16; init_wr_hdr() 458 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) t4_ring_sq_db() argument 464 if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { t4_ring_sq_db() 469 (u64 *)wqe); t4_ring_sq_db() 485 union t4_recv_wr *wqe) t4_ring_rq_db() 491 if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { t4_ring_rq_db() 496 (void *)wqe); t4_ring_rq_db() 484 t4_ring_rq_db(struct t4_wq *wq, u16 inc, union t4_recv_wr *wqe) t4_ring_rq_db() argument
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_ruc.c | 82 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) qib_init_sge() argument 95 for (i = j = 0; i < wqe->num_sge; i++) { qib_init_sge() 96 if (wqe->sg_list[i].length == 0) qib_init_sge() 100 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) qib_init_sge() 102 qp->r_len += wqe->sg_list[i].length; qib_init_sge() 118 wc.wr_id = wqe->wr_id; qib_init_sge() 145 struct qib_rwqe *wqe; qib_get_rwqe() local 177 wqe = get_rwqe_ptr(rq, tail); qib_get_rwqe() 186 if (!wr_id_only && !qib_init_sge(qp, wqe)) { qib_get_rwqe() 190 qp->r_wr_id = wqe->wr_id; qib_get_rwqe() 360 struct qib_swqe *wqe; qib_ruc_loopback() local 388 wqe = get_swqe_ptr(sqp, sqp->s_last); qib_ruc_loopback() 428 sqp->s_sge.sge = wqe->sg_list[0]; qib_ruc_loopback() 429 sqp->s_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback() 430 sqp->s_sge.num_sge = wqe->wr.num_sge; qib_ruc_loopback() 431 sqp->s_len = wqe->length; qib_ruc_loopback() 432 switch (wqe->wr.opcode) { qib_ruc_loopback() 435 wc.ex.imm_data = wqe->wr.ex.imm_data; qib_ruc_loopback() 449 wc.ex.imm_data = wqe->wr.ex.imm_data; qib_ruc_loopback() 459 if (wqe->length == 0) qib_ruc_loopback() 461 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, qib_ruc_loopback() 462 wqe->rdma_wr.remote_addr, qib_ruc_loopback() 463 wqe->rdma_wr.rkey, qib_ruc_loopback() 468 qp->r_sge.total_len = wqe->length; qib_ruc_loopback() 474 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, qib_ruc_loopback() 475 wqe->rdma_wr.remote_addr, qib_ruc_loopback() 476 wqe->rdma_wr.rkey, qib_ruc_loopback() 482 qp->r_sge.sge = wqe->sg_list[0]; qib_ruc_loopback() 483 qp->r_sge.sg_list = wqe->sg_list + 1; qib_ruc_loopback() 484 qp->r_sge.num_sge = wqe->wr.num_sge; qib_ruc_loopback() 485 qp->r_sge.total_len = wqe->length; qib_ruc_loopback() 493 wqe->atomic_wr.remote_addr, qib_ruc_loopback() 494 wqe->atomic_wr.rkey, qib_ruc_loopback() 499 sdata = wqe->atomic_wr.compare_add; qib_ruc_loopback() 501 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? qib_ruc_loopback() 504 sdata, wqe->atomic_wr.swap); qib_ruc_loopback() 551 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) qib_ruc_loopback() 557 wc.byte_len = wqe->length; qib_ruc_loopback() 565 wqe->wr.send_flags & IB_SEND_SOLICITED); qib_ruc_loopback() 572 qib_send_complete(sqp, wqe, send_status); qib_ruc_loopback() 619 qib_send_complete(sqp, wqe, send_status); qib_ruc_loopback() 771 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, qib_send_complete() argument 780 for (i = 0; i < wqe->wr.num_sge; i++) { qib_send_complete() 781 struct qib_sge *sge = &wqe->sg_list[i]; qib_send_complete() 788 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); qib_send_complete() 792 (wqe->wr.send_flags & IB_SEND_SIGNALED) || qib_send_complete() 797 wc.wr_id = wqe->wr.wr_id; qib_send_complete() 799 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; qib_send_complete() 802 wc.byte_len = wqe->length; qib_send_complete()
|
H A D | qib_rc.c | 43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, restart_sge() argument 48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; restart_sge() 49 ss->sge = wqe->sg_list[0]; restart_sge() 50 ss->sg_list = wqe->sg_list + 1; restart_sge() 51 ss->num_sge = wqe->wr.num_sge; restart_sge() 52 ss->total_len = wqe->length; restart_sge() 54 return wqe->length - len; restart_sge() 236 struct qib_swqe *wqe; qib_make_rc_req() local 273 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_rc_req() 274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? qib_make_rc_req() 297 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_rc_req() 318 if ((wqe->wr.send_flags & IB_SEND_FENCE) && qib_make_rc_req() 323 wqe->psn = qp->s_next_psn; qib_make_rc_req() 331 len = wqe->length; qib_make_rc_req() 334 switch (wqe->wr.opcode) { qib_make_rc_req() 339 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req() 343 wqe->lpsn = wqe->psn; qib_make_rc_req() 345 wqe->lpsn += (len - 1) / pmtu; qib_make_rc_req() 350 if (wqe->wr.opcode == IB_WR_SEND) qib_make_rc_req() 355 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_rc_req() 358 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_rc_req() 372 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qib_make_rc_req() 378 cpu_to_be64(wqe->rdma_wr.remote_addr); qib_make_rc_req() 380 cpu_to_be32(wqe->rdma_wr.rkey); qib_make_rc_req() 383 wqe->lpsn = wqe->psn; qib_make_rc_req() 385 wqe->lpsn += (len - 1) / pmtu; qib_make_rc_req() 390 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE) qib_make_rc_req() 396 wqe->rdma_wr.wr.ex.imm_data; qib_make_rc_req() 398 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED) qib_make_rc_req() 426 wqe->lpsn = qp->s_next_psn++; qib_make_rc_req() 430 cpu_to_be64(wqe->rdma_wr.remote_addr); qib_make_rc_req() 432 cpu_to_be32(wqe->rdma_wr.rkey); qib_make_rc_req() 458 wqe->lpsn = wqe->psn; qib_make_rc_req() 460 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qib_make_rc_req() 463 wqe->atomic_wr.swap); qib_make_rc_req() 465 wqe->atomic_wr.compare_add); qib_make_rc_req() 469 wqe->atomic_wr.compare_add); qib_make_rc_req() 473 wqe->atomic_wr.remote_addr >> 32); qib_make_rc_req() 475 wqe->atomic_wr.remote_addr); qib_make_rc_req() 477 wqe->atomic_wr.rkey); qib_make_rc_req() 489 qp->s_sge.sge = wqe->sg_list[0]; qib_make_rc_req() 490 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_rc_req() 491 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_rc_req() 492 qp->s_sge.total_len = wqe->length; qib_make_rc_req() 493 qp->s_len = wqe->length; qib_make_rc_req() 499 if (wqe->wr.opcode == IB_WR_RDMA_READ) qib_make_rc_req() 500 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req() 518 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req() 533 if (wqe->wr.opcode == IB_WR_SEND) qib_make_rc_req() 538 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_rc_req() 541 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_rc_req() 559 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); qib_make_rc_req() 574 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qib_make_rc_req() 579 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_rc_req() 581 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_rc_req() 600 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; qib_make_rc_req() 602 cpu_to_be64(wqe->rdma_wr.remote_addr + len); qib_make_rc_req() 604 cpu_to_be32(wqe->rdma_wr.rkey); qib_make_rc_req() 605 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); qib_make_rc_req() 609 qp->s_psn = wqe->lpsn + 1; qib_make_rc_req() 618 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; qib_make_rc_req() 788 struct qib_swqe *wqe = get_swqe_ptr(qp, n); reset_psn() local 797 if (qib_cmp24(psn, wqe->psn) <= 0) { reset_psn() 803 opcode = wqe->wr.opcode; reset_psn() 811 wqe = get_swqe_ptr(qp, n); reset_psn() 812 diff = qib_cmp24(psn, wqe->psn); reset_psn() 824 opcode = wqe->wr.opcode; reset_psn() 872 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); qib_restart_rc() local 880 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); qib_restart_rc() 889 if (wqe->wr.opcode == IB_WR_RDMA_READ) qib_restart_rc() 948 struct qib_swqe *wqe; reset_sending_psn() local 953 wqe = get_swqe_ptr(qp, n); reset_sending_psn() 954 if (qib_cmp24(psn, wqe->lpsn) <= 0) { reset_sending_psn() 955 if (wqe->wr.opcode == IB_WR_RDMA_READ) reset_sending_psn() 956 qp->s_sending_psn = wqe->lpsn + 1; reset_sending_psn() 974 struct qib_swqe *wqe; qib_rc_send_complete() local 1010 wqe = get_swqe_ptr(qp, qp->s_last); qib_rc_send_complete() 1011 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_rc_send_complete() 1014 for (i = 0; i < wqe->wr.num_sge; i++) { qib_rc_send_complete() 1015 struct qib_sge *sge = &wqe->sg_list[i]; qib_rc_send_complete() 1021 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { qib_rc_send_complete() 1023 wc.wr_id = wqe->wr.wr_id; qib_rc_send_complete() 1025 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; qib_rc_send_complete() 1026 wc.byte_len = wqe->length; qib_rc_send_complete() 1057 struct qib_swqe *wqe, do_rc_completion() 1068 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || do_rc_completion() 1070 for (i = 0; i < wqe->wr.num_sge; i++) { do_rc_completion() 1071 struct qib_sge *sge = &wqe->sg_list[i]; do_rc_completion() 1077 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { do_rc_completion() 1079 wc.wr_id = wqe->wr.wr_id; do_rc_completion() 1081 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; do_rc_completion() 1082 wc.byte_len = wqe->length; do_rc_completion() 1092 update_last_psn(qp, wqe->lpsn); do_rc_completion() 1103 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_completion() 1106 qp->s_psn = wqe->psn; do_rc_completion() 1113 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_completion() 1115 return wqe; do_rc_completion() 1134 struct qib_swqe *wqe; do_rc_ack() local 1154 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_ack() 1161 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) { do_rc_ack() 1168 if (wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 1183 if ((wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 1185 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1186 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && do_rc_ack() 1205 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1206 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { do_rc_ack() 1207 u64 *vaddr = wqe->sg_list[0].vaddr; do_rc_ack() 1211 (wqe->wr.opcode == IB_WR_RDMA_READ || do_rc_ack() 1212 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1213 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { do_rc_ack() 1227 wqe = do_rc_completion(qp, wqe, ibp); do_rc_ack() 1325 qib_send_complete(qp, wqe, status); do_rc_ack() 1355 struct qib_swqe *wqe; rdma_seq_err() local 1363 wqe = get_swqe_ptr(qp, qp->s_acked); rdma_seq_err() 1365 while (qib_cmp24(psn, wqe->lpsn) > 0) { rdma_seq_err() 1366 if (wqe->wr.opcode == IB_WR_RDMA_READ || rdma_seq_err() 1367 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || rdma_seq_err() 1368 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) rdma_seq_err() 1370 wqe = do_rc_completion(qp, wqe, ibp); rdma_seq_err() 1407 struct qib_swqe *wqe; qib_rc_rcv_resp() local 1471 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1490 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1491 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) qib_rc_rcv_resp() 1499 wqe, psn, pmtu); qib_rc_rcv_resp() 1506 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) qib_rc_rcv_resp() 1556 wqe = get_swqe_ptr(qp, qp->s_acked); qib_rc_rcv_resp() 1558 wqe, psn, pmtu); qib_rc_rcv_resp() 1565 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) qib_rc_rcv_resp() 1600 qib_send_complete(qp, wqe, status); qib_rc_rcv_resp() 1056 do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) do_rc_completion() argument
|
H A D | qib_uc.c | 49 struct qib_swqe *wqe; qib_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_uc_req() 97 wqe->psn = qp->s_next_psn; qib_make_uc_req() 99 qp->s_sge.sge = wqe->sg_list[0]; qib_make_uc_req() 100 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_uc_req() 101 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_uc_req() 102 qp->s_sge.total_len = wqe->length; qib_make_uc_req() 103 len = wqe->length; qib_make_uc_req() 105 switch (wqe->wr.opcode) { qib_make_uc_req() 113 if (wqe->wr.opcode == IB_WR_SEND) qib_make_uc_req() 119 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_uc_req() 122 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_uc_req() 124 qp->s_wqe = wqe; qib_make_uc_req() 132 cpu_to_be64(wqe->rdma_wr.remote_addr); qib_make_uc_req() 134 cpu_to_be32(wqe->rdma_wr.rkey); qib_make_uc_req() 142 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qib_make_uc_req() 148 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; qib_make_uc_req() 150 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_uc_req() 153 qp->s_wqe = wqe; qib_make_uc_req() 172 if (wqe->wr.opcode == IB_WR_SEND) qib_make_uc_req() 177 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_uc_req() 180 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_uc_req() 182 qp->s_wqe = wqe; qib_make_uc_req() 196 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qib_make_uc_req() 202 ohdr->u.imm_data = wqe->wr.ex.imm_data; qib_make_uc_req() 204 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_uc_req() 207 qp->s_wqe = wqe; qib_make_uc_req()
|
H A D | qib_ud.c | 241 struct qib_swqe *wqe; qib_make_ud_req() local 264 wqe = get_swqe_ptr(qp, qp->s_last); qib_make_ud_req() 265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); qib_make_ud_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); qib_make_ud_req() 280 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; qib_make_ud_req() 303 qib_ud_loopback(qp, wqe); qib_make_ud_req() 305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); qib_make_ud_req() 311 extra_bytes = -wqe->length & 3; qib_make_ud_req() 312 nwords = (wqe->length + extra_bytes) >> 2; qib_make_ud_req() 316 qp->s_cur_size = wqe->length; qib_make_ud_req() 319 qp->s_wqe = wqe; qib_make_ud_req() 320 qp->s_sge.sge = wqe->sg_list[0]; qib_make_ud_req() 321 qp->s_sge.sg_list = wqe->sg_list + 1; qib_make_ud_req() 322 qp->s_sge.num_sge = wqe->wr.num_sge; qib_make_ud_req() 323 qp->s_sge.total_len = wqe->length; qib_make_ud_req() 341 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qib_make_ud_req() 343 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; qib_make_ud_req() 361 if (wqe->wr.send_flags & IB_SEND_SOLICITED) qib_make_ud_req() 366 wqe->ud_wr.pkey_index : qp->s_pkey_index); qib_make_ud_req() 374 cpu_to_be32(wqe->ud_wr.remote_qpn); qib_make_ud_req() 380 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? qib_make_ud_req() 381 qp->qkey : wqe->ud_wr.remote_qkey); qib_make_ud_req()
|
H A D | qib_srq.c | 57 struct qib_rwqe *wqe; qib_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); qib_post_srq_receive() 80 wqe->wr_id = wr->wr_id; qib_post_srq_receive() 81 wqe->num_sge = wr->num_sge; qib_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; qib_post_srq_receive() 282 struct qib_rwqe *wqe; qib_modify_srq() local 285 wqe = get_rwqe_ptr(&srq->rq, tail); qib_modify_srq() 286 p->wr_id = wqe->wr_id; qib_modify_srq() 287 p->num_sge = wqe->num_sge; qib_modify_srq() 288 for (i = 0; i < wqe->num_sge; i++) qib_modify_srq() 289 p->sg_list[i] = wqe->sg_list[i]; qib_modify_srq()
|
H A D | qib_verbs.c | 340 struct qib_swqe *wqe; qib_post_one_send() local 399 wqe = get_swqe_ptr(qp, qp->s_head); qib_post_one_send() 403 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); qib_post_one_send() 405 memcpy(&wqe->reg_wr, reg_wr(wr), qib_post_one_send() 406 sizeof(wqe->reg_wr)); qib_post_one_send() 410 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); qib_post_one_send() 413 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); qib_post_one_send() 415 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); qib_post_one_send() 417 wqe->length = 0; qib_post_one_send() 428 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], qib_post_one_send() 432 wqe->length += length; qib_post_one_send() 435 wqe->wr.num_sge = j; qib_post_one_send() 439 if (wqe->length > 0x80000000U) qib_post_one_send() 441 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + qib_post_one_send() 446 wqe->ssn = qp->s_ssn++; qib_post_one_send() 454 struct qib_sge *sge = &wqe->sg_list[--j]; qib_post_one_send() 526 struct qib_rwqe *wqe; qib_post_receive() local 547 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); qib_post_receive() 548 wqe->wr_id = wr->wr_id; qib_post_receive() 549 wqe->num_sge = wr->num_sge; qib_post_receive() 551 wqe->sg_list[i] = wr->sg_list[i]; qib_post_receive() 1101 if (tx->wqe) sdma_complete() 1102 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); sdma_complete() 1181 tx->wqe = qp->s_wqe; qib_verbs_send_dma()
|
H A D | qib_qp.c | 429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); clear_mr_refs() local 432 for (i = 0; i < wqe->wr.num_sge; i++) { clear_mr_refs() 433 struct qib_sge *sge = &wqe->sg_list[i]; clear_mr_refs() 440 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); clear_mr_refs() 1367 struct qib_swqe *wqe; qib_qp_iter_print() local 1370 wqe = get_swqe_ptr(qp, qp->s_last); qib_qp_iter_print() 1377 wqe->wr.opcode, qib_qp_iter_print() 1383 wqe->ssn, qib_qp_iter_print()
|
H A D | qib_verbs.h | 1114 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
|
H A D | qib.h | 262 struct qib_swqe *wqe; member in struct:qib_verbs_txreq
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_ruc.c | 122 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, ipath_init_sge() argument 129 for (i = j = 0; i < wqe->num_sge; i++) { ipath_init_sge() 130 if (wqe->sg_list[i].length == 0) ipath_init_sge() 134 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) ipath_init_sge() 136 *lengthp += wqe->sg_list[i].length; ipath_init_sge() 145 wc.wr_id = wqe->wr_id; ipath_init_sge() 171 struct ipath_rwqe *wqe; ipath_get_rwqe() local 204 wqe = get_rwqe_ptr(rq, tail); ipath_get_rwqe() 210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); ipath_get_rwqe() 211 qp->r_wr_id = wqe->wr_id; ipath_get_rwqe() 263 struct ipath_swqe *wqe; ipath_ruc_loopback() local 289 wqe = get_swqe_ptr(sqp, sqp->s_last); ipath_ruc_loopback() 327 sqp->s_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback() 328 sqp->s_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback() 329 sqp->s_sge.num_sge = wqe->wr.num_sge; ipath_ruc_loopback() 330 sqp->s_len = wqe->length; ipath_ruc_loopback() 331 switch (wqe->wr.opcode) { ipath_ruc_loopback() 334 wc.ex.imm_data = wqe->wr.ex.imm_data; ipath_ruc_loopback() 345 wc.ex.imm_data = wqe->wr.ex.imm_data; ipath_ruc_loopback() 352 if (wqe->length == 0) ipath_ruc_loopback() 354 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, ipath_ruc_loopback() 355 wqe->rdma_wr.remote_addr, ipath_ruc_loopback() 356 wqe->rdma_wr.rkey, ipath_ruc_loopback() 364 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, ipath_ruc_loopback() 365 wqe->rdma_wr.remote_addr, ipath_ruc_loopback() 366 wqe->rdma_wr.rkey, ipath_ruc_loopback() 369 qp->r_sge.sge = wqe->sg_list[0]; ipath_ruc_loopback() 370 qp->r_sge.sg_list = wqe->sg_list + 1; ipath_ruc_loopback() 371 qp->r_sge.num_sge = wqe->wr.num_sge; ipath_ruc_loopback() 379 wqe->atomic_wr.remote_addr, ipath_ruc_loopback() 380 wqe->atomic_wr.rkey, ipath_ruc_loopback() 385 sdata = wqe->atomic_wr.compare_add; ipath_ruc_loopback() 387 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? ipath_ruc_loopback() 390 sdata, wqe->atomic_wr.swap); ipath_ruc_loopback() 431 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) ipath_ruc_loopback() 437 wc.byte_len = wqe->length; ipath_ruc_loopback() 445 wqe->wr.send_flags & IB_SEND_SOLICITED); ipath_ruc_loopback() 451 ipath_send_complete(sqp, wqe, send_status); ipath_ruc_loopback() 491 ipath_send_complete(sqp, wqe, send_status); ipath_ruc_loopback() 698 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, ipath_send_complete() argument 708 (wqe->wr.send_flags & IB_SEND_SIGNALED) || ipath_send_complete() 713 wc.wr_id = wqe->wr.wr_id; ipath_send_complete() 715 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; ipath_send_complete() 718 wc.byte_len = wqe->length; ipath_send_complete()
|
H A D | ipath_uc.c | 49 struct ipath_swqe *wqe; ipath_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_uc_req() 97 qp->s_psn = wqe->psn = qp->s_next_psn; ipath_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_uc_req() 101 qp->s_len = len = wqe->length; ipath_make_uc_req() 102 switch (wqe->wr.opcode) { ipath_make_uc_req() 110 if (wqe->wr.opcode == IB_WR_SEND) ipath_make_uc_req() 116 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_uc_req() 119 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_uc_req() 121 qp->s_wqe = wqe; ipath_make_uc_req() 129 cpu_to_be64(wqe->rdma_wr.remote_addr); ipath_make_uc_req() 131 cpu_to_be32(wqe->rdma_wr.rkey); ipath_make_uc_req() 139 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) ipath_make_uc_req() 145 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; ipath_make_uc_req() 147 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_uc_req() 150 qp->s_wqe = wqe; ipath_make_uc_req() 169 if (wqe->wr.opcode == IB_WR_SEND) ipath_make_uc_req() 174 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_uc_req() 177 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_uc_req() 179 qp->s_wqe = wqe; ipath_make_uc_req() 193 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) ipath_make_uc_req() 199 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_uc_req() 201 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_uc_req() 204 qp->s_wqe = wqe; ipath_make_uc_req()
|
H A D | ipath_rc.c | 42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, restart_sge() argument 47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; restart_sge() 48 ss->sge = wqe->sg_list[0]; restart_sge() 49 ss->sg_list = wqe->sg_list + 1; restart_sge() 50 ss->num_sge = wqe->wr.num_sge; restart_sge() 52 return wqe->length - len; restart_sge() 58 * @wqe: the work queue to initialize the QP's SGE from 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, ipath_init_restart() 218 struct ipath_swqe *wqe; ipath_make_rc_req() local 256 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_rc_req() 257 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_rc_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_rc_req() 294 if ((wqe->wr.send_flags & IB_SEND_FENCE) && ipath_make_rc_req() 299 wqe->psn = qp->s_next_psn; ipath_make_rc_req() 307 len = wqe->length; ipath_make_rc_req() 310 switch (wqe->wr.opcode) { ipath_make_rc_req() 315 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req() 319 wqe->lpsn = wqe->psn; ipath_make_rc_req() 321 wqe->lpsn += (len - 1) / pmtu; ipath_make_rc_req() 326 if (wqe->wr.opcode == IB_WR_SEND) ipath_make_rc_req() 331 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_rc_req() 334 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_rc_req() 348 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { ipath_make_rc_req() 353 cpu_to_be64(wqe->rdma_wr.remote_addr); ipath_make_rc_req() 355 cpu_to_be32(wqe->rdma_wr.rkey); ipath_make_rc_req() 358 wqe->lpsn = wqe->psn; ipath_make_rc_req() 360 wqe->lpsn += (len - 1) / pmtu; ipath_make_rc_req() 365 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) ipath_make_rc_req() 371 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; ipath_make_rc_req() 373 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_rc_req() 401 wqe->lpsn = qp->s_next_psn++; ipath_make_rc_req() 404 cpu_to_be64(wqe->rdma_wr.remote_addr); ipath_make_rc_req() 406 cpu_to_be32(wqe->rdma_wr.rkey); ipath_make_rc_req() 431 wqe->lpsn = wqe->psn; ipath_make_rc_req() 433 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { ipath_make_rc_req() 436 wqe->atomic_wr.swap); ipath_make_rc_req() 438 wqe->atomic_wr.compare_add); ipath_make_rc_req() 442 wqe->atomic_wr.compare_add); ipath_make_rc_req() 446 wqe->atomic_wr.remote_addr >> 32); ipath_make_rc_req() 448 wqe->atomic_wr.remote_addr); ipath_make_rc_req() 450 wqe->atomic_wr.rkey); ipath_make_rc_req() 461 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_rc_req() 462 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_rc_req() 463 qp->s_sge.num_sge = wqe->wr.num_sge; ipath_make_rc_req() 464 qp->s_len = wqe->length; ipath_make_rc_req() 471 if (wqe->wr.opcode == IB_WR_RDMA_READ) ipath_make_rc_req() 472 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req() 495 ipath_init_restart(qp, wqe); ipath_make_rc_req() 510 if (wqe->wr.opcode == IB_WR_SEND) ipath_make_rc_req() 515 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_rc_req() 518 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_rc_req() 531 ipath_init_restart(qp, wqe); ipath_make_rc_req() 546 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) ipath_make_rc_req() 551 ohdr->u.imm_data = wqe->wr.ex.imm_data; ipath_make_rc_req() 553 if (wqe->wr.send_flags & IB_SEND_SOLICITED) ipath_make_rc_req() 567 ipath_init_restart(qp, wqe); ipath_make_rc_req() 568 len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; ipath_make_rc_req() 570 cpu_to_be64(wqe->rdma_wr.remote_addr + len); ipath_make_rc_req() 572 cpu_to_be32(wqe->rdma_wr.rkey); ipath_make_rc_req() 577 qp->s_psn = wqe->lpsn + 1; ipath_make_rc_req() 727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); reset_psn() local 736 if (ipath_cmp24(psn, wqe->psn) <= 0) { reset_psn() 742 opcode = wqe->wr.opcode; reset_psn() 750 wqe = get_swqe_ptr(qp, n); reset_psn() 751 diff = ipath_cmp24(psn, wqe->psn); reset_psn() 763 opcode = wqe->wr.opcode; reset_psn() 807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); ipath_restart_rc() local 811 ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); ipath_restart_rc() 829 if (wqe->wr.opcode == IB_WR_RDMA_READ) ipath_restart_rc() 863 struct ipath_swqe *wqe; do_rc_ack() local 888 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack() 894 while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) { do_rc_ack() 901 if (wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 916 if ((wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 918 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 919 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && do_rc_ack() 925 update_last_psn(qp, wqe->psn - 1); do_rc_ack() 927 ipath_restart_rc(qp, wqe->psn); do_rc_ack() 934 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 935 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) do_rc_ack() 936 *(u64 *) wqe->sg_list[0].vaddr = val; do_rc_ack() 938 (wqe->wr.opcode == IB_WR_RDMA_READ || do_rc_ack() 939 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 940 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { do_rc_ack() 950 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { do_rc_ack() 952 wc.wr_id = wqe->wr.wr_id; do_rc_ack() 954 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; do_rc_ack() 955 wc.byte_len = wqe->length; do_rc_ack() 974 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_ack() 976 qp->s_psn = wqe->psn; do_rc_ack() 984 wqe = get_swqe_ptr(qp, qp->s_last); do_rc_ack() 1032 if (wqe->wr.opcode == IB_WR_RDMA_READ) do_rc_ack() 1079 ipath_send_complete(qp, wqe, status); do_rc_ack() 1125 struct ipath_swqe *wqe; ipath_rc_rcv_resp() local 1162 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp() 1189 wqe = get_swqe_ptr(qp, qp->s_last); ipath_rc_rcv_resp() 1190 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) ipath_rc_rcv_resp() 1199 wqe, psn, pmtu); ipath_rc_rcv_resp() 1212 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1261 wqe = get_swqe_ptr(qp, qp->s_last); 1263 wqe, psn, pmtu); 1276 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1310 ipath_send_complete(qp, wqe, status);
|
H A D | ipath_ud.c | 60 struct ipath_rwqe *wqe; ipath_ud_loopback() local 132 wqe = get_rwqe_ptr(rq, tail); ipath_ud_loopback() 134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { ipath_ud_loopback() 148 wc.wr_id = wqe->wr_id; ipath_ud_loopback() 245 struct ipath_swqe *wqe; ipath_make_ud_req() local 268 wqe = get_swqe_ptr(qp, qp->s_last); ipath_make_ud_req() 269 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); ipath_make_ud_req() 276 wqe = get_swqe_ptr(qp, qp->s_cur); ipath_make_ud_req() 282 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; ipath_make_ud_req() 305 ipath_ud_loopback(qp, wqe); ipath_make_ud_req() 307 ipath_send_complete(qp, wqe, IB_WC_SUCCESS); ipath_make_ud_req() 313 extra_bytes = -wqe->length & 3; ipath_make_ud_req() 314 nwords = (wqe->length + extra_bytes) >> 2; ipath_make_ud_req() 318 qp->s_cur_size = wqe->length; ipath_make_ud_req() 321 qp->s_wqe = wqe; ipath_make_ud_req() 322 qp->s_sge.sge = wqe->sg_list[0]; ipath_make_ud_req() 323 qp->s_sge.sg_list = wqe->sg_list + 1; ipath_make_ud_req() 324 qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge; ipath_make_ud_req() 342 if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) { ipath_make_ud_req() 344 ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data; ipath_make_ud_req() 362 if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED) ipath_make_ud_req() 374 cpu_to_be32(wqe->ud_wr.remote_qpn); ipath_make_ud_req() 380 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? ipath_make_ud_req() 381 qp->qkey : wqe->ud_wr.remote_qkey); ipath_make_ud_req()
|
H A D | ipath_srq.c | 57 struct ipath_rwqe *wqe; ipath_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); ipath_post_srq_receive() 80 wqe->wr_id = wr->wr_id; ipath_post_srq_receive() 81 wqe->num_sge = wr->num_sge; ipath_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; ipath_post_srq_receive() 286 struct ipath_rwqe *wqe; ipath_modify_srq() local 289 wqe = get_rwqe_ptr(&srq->rq, tail); ipath_modify_srq() 290 p->wr_id = wqe->wr_id; ipath_modify_srq() 291 p->num_sge = wqe->num_sge; ipath_modify_srq() 292 for (i = 0; i < wqe->num_sge; i++) ipath_modify_srq() 293 p->sg_list[i] = wqe->sg_list[i]; ipath_modify_srq()
|
H A D | ipath_verbs.c | 338 struct ipath_swqe *wqe; ipath_post_one_send() local 397 wqe = get_swqe_ptr(qp, qp->s_head); ipath_post_one_send() 401 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); ipath_post_one_send() 405 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); ipath_post_one_send() 408 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); ipath_post_one_send() 410 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); ipath_post_one_send() 412 wqe->length = 0; ipath_post_one_send() 422 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], ipath_post_one_send() 426 wqe->length += length; ipath_post_one_send() 429 wqe->wr.num_sge = j; ipath_post_one_send() 433 if (wqe->length > 0x80000000U) ipath_post_one_send() 435 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) ipath_post_one_send() 437 wqe->ssn = qp->s_ssn++; ipath_post_one_send() 503 struct ipath_rwqe *wqe; ipath_post_receive() local 524 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); ipath_post_receive() 525 wqe->wr_id = wr->wr_id; ipath_post_receive() 526 wqe->num_sge = wr->num_sge; ipath_post_receive() 528 wqe->sg_list[i] = wr->sg_list[i]; ipath_post_receive() 1052 if (tx->wqe) sdma_complete() 1053 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1060 } else if (tx->wqe) { sdma_complete() 1062 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1152 tx->wqe = qp->s_wqe; ipath_verbs_send_dma()
|
H A D | ipath_verbs.h | 653 struct ipath_swqe *wqe; member in struct:ipath_verbs_txreq 866 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, 880 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | rc.c | 63 static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe, restart_sge() argument 68 len = delta_psn(psn, wqe->psn) * pmtu; restart_sge() 69 ss->sge = wqe->sg_list[0]; restart_sge() 70 ss->sg_list = wqe->sg_list + 1; restart_sge() 71 ss->num_sge = wqe->wr.num_sge; restart_sge() 72 ss->total_len = wqe->length; restart_sge() 74 return wqe->length - len; restart_sge() 265 struct hfi1_swqe *wqe; hfi1_make_rc_req() local 305 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_rc_req() 306 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? hfi1_make_rc_req() 325 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_rc_req() 348 if ((wqe->wr.send_flags & IB_SEND_FENCE) && hfi1_make_rc_req() 353 wqe->psn = qp->s_next_psn; hfi1_make_rc_req() 361 len = wqe->length; hfi1_make_rc_req() 364 switch (wqe->wr.opcode) { hfi1_make_rc_req() 369 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { hfi1_make_rc_req() 373 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 375 wqe->lpsn += (len - 1) / pmtu; hfi1_make_rc_req() 380 if (wqe->wr.opcode == IB_WR_SEND) hfi1_make_rc_req() 385 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_rc_req() 388 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_rc_req() 402 cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) { hfi1_make_rc_req() 407 cpu_to_be64(wqe->rdma_wr.remote_addr); hfi1_make_rc_req() 409 cpu_to_be32(wqe->rdma_wr.rkey); hfi1_make_rc_req() 412 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 414 wqe->lpsn += (len - 1) / pmtu; hfi1_make_rc_req() 419 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) hfi1_make_rc_req() 425 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hfi1_make_rc_req() 427 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_rc_req() 455 wqe->lpsn = qp->s_next_psn++; hfi1_make_rc_req() 458 cpu_to_be64(wqe->rdma_wr.remote_addr); hfi1_make_rc_req() 460 cpu_to_be32(wqe->rdma_wr.rkey); hfi1_make_rc_req() 486 wqe->lpsn = wqe->psn; hfi1_make_rc_req() 488 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { hfi1_make_rc_req() 491 wqe->atomic_wr.swap); hfi1_make_rc_req() 493 wqe->atomic_wr.compare_add); hfi1_make_rc_req() 497 wqe->atomic_wr.compare_add); hfi1_make_rc_req() 501 wqe->atomic_wr.remote_addr >> 32); hfi1_make_rc_req() 503 wqe->atomic_wr.remote_addr); hfi1_make_rc_req() 505 wqe->atomic_wr.rkey); hfi1_make_rc_req() 517 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_rc_req() 518 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_rc_req() 519 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_rc_req() 520 qp->s_sge.total_len = wqe->length; hfi1_make_rc_req() 521 qp->s_len = wqe->length; hfi1_make_rc_req() 527 if (wqe->wr.opcode == IB_WR_RDMA_READ) hfi1_make_rc_req() 528 qp->s_psn = wqe->lpsn + 1; hfi1_make_rc_req() 546 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); hfi1_make_rc_req() 562 if (wqe->wr.opcode == IB_WR_SEND) hfi1_make_rc_req() 567 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_rc_req() 570 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_rc_req() 588 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); hfi1_make_rc_req() 604 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) hfi1_make_rc_req() 609 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_rc_req() 611 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_rc_req() 630 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu; hfi1_make_rc_req() 632 cpu_to_be64(wqe->rdma_wr.remote_addr + len); hfi1_make_rc_req() 634 cpu_to_be32(wqe->rdma_wr.rkey); hfi1_make_rc_req() 635 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); hfi1_make_rc_req() 639 qp->s_psn = wqe->lpsn + 1; hfi1_make_rc_req() 648 delta = delta_psn(bth2, wqe->psn); hfi1_make_rc_req() 799 struct hfi1_swqe *wqe = get_swqe_ptr(qp, n); reset_psn() local 808 if (cmp_psn(psn, wqe->psn) <= 0) { reset_psn() 814 opcode = wqe->wr.opcode; reset_psn() 822 wqe = get_swqe_ptr(qp, n); reset_psn() 823 diff = cmp_psn(psn, wqe->psn); reset_psn() 835 opcode = wqe->wr.opcode; reset_psn() 884 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); restart_rc() local 892 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); restart_rc() 901 if (wqe->wr.opcode == IB_WR_RDMA_READ) restart_rc() 961 struct hfi1_swqe *wqe; reset_sending_psn() local 966 wqe = get_swqe_ptr(qp, n); reset_sending_psn() 967 if (cmp_psn(psn, wqe->lpsn) <= 0) { reset_sending_psn() 968 if (wqe->wr.opcode == IB_WR_RDMA_READ) reset_sending_psn() 969 qp->s_sending_psn = wqe->lpsn + 1; reset_sending_psn() 987 struct hfi1_swqe *wqe; hfi1_rc_send_complete() local 1024 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_rc_send_complete() 1025 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 && hfi1_rc_send_complete() 1028 for (i = 0; i < wqe->wr.num_sge; i++) { hfi1_rc_send_complete() 1029 struct hfi1_sge *sge = &wqe->sg_list[i]; hfi1_rc_send_complete() 1035 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { hfi1_rc_send_complete() 1037 wc.wr_id = wqe->wr.wr_id; hfi1_rc_send_complete() 1039 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; hfi1_rc_send_complete() 1040 wc.byte_len = wqe->length; hfi1_rc_send_complete() 1072 struct hfi1_swqe *wqe, do_rc_completion() 1083 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || do_rc_completion() 1085 for (i = 0; i < wqe->wr.num_sge; i++) { do_rc_completion() 1086 struct hfi1_sge *sge = &wqe->sg_list[i]; do_rc_completion() 1092 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { do_rc_completion() 1094 wc.wr_id = wqe->wr.wr_id; do_rc_completion() 1096 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; do_rc_completion() 1097 wc.byte_len = wqe->length; do_rc_completion() 1123 update_last_psn(qp, wqe->lpsn); do_rc_completion() 1134 wqe = get_swqe_ptr(qp, qp->s_cur); do_rc_completion() 1137 qp->s_psn = wqe->psn; do_rc_completion() 1144 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_completion() 1146 return wqe; do_rc_completion() 1165 struct hfi1_swqe *wqe; do_rc_ack() local 1185 wqe = get_swqe_ptr(qp, qp->s_acked); do_rc_ack() 1192 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) { do_rc_ack() 1199 if (wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 1214 if ((wqe->wr.opcode == IB_WR_RDMA_READ && do_rc_ack() 1216 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1217 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && do_rc_ack() 1236 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1237 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { do_rc_ack() 1238 u64 *vaddr = wqe->sg_list[0].vaddr; do_rc_ack() 1242 (wqe->wr.opcode == IB_WR_RDMA_READ || do_rc_ack() 1243 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || do_rc_ack() 1244 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { do_rc_ack() 1258 wqe = do_rc_completion(qp, wqe, ibp); do_rc_ack() 1356 hfi1_send_complete(qp, wqe, status); do_rc_ack() 1386 struct hfi1_swqe *wqe; rdma_seq_err() local 1394 wqe = get_swqe_ptr(qp, qp->s_acked); rdma_seq_err() 1396 while (cmp_psn(psn, wqe->lpsn) > 0) { rdma_seq_err() 1397 if (wqe->wr.opcode == IB_WR_RDMA_READ || rdma_seq_err() 1398 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || rdma_seq_err() 1399 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) rdma_seq_err() 1401 wqe = do_rc_completion(qp, wqe, ibp); rdma_seq_err() 1436 struct hfi1_swqe *wqe; rc_rcv_resp() local 1476 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp() 1494 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp() 1495 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) rc_rcv_resp() 1503 wqe, psn, pmtu); rc_rcv_resp() 1510 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) rc_rcv_resp() 1559 wqe = get_swqe_ptr(qp, qp->s_acked); rc_rcv_resp() 1561 wqe, psn, pmtu); rc_rcv_resp() 1568 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) rc_rcv_resp() 1602 hfi1_send_complete(qp, wqe, status); rc_rcv_resp() 1071 do_rc_completion(struct hfi1_qp *qp, struct hfi1_swqe *wqe, struct hfi1_ibport *ibp) do_rc_completion() argument
|
H A D | ruc.c | 100 static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) init_sge() argument 113 for (i = j = 0; i < wqe->num_sge; i++) { init_sge() 114 if (wqe->sg_list[i].length == 0) init_sge() 118 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) init_sge() 120 qp->r_len += wqe->sg_list[i].length; init_sge() 136 wc.wr_id = wqe->wr_id; init_sge() 163 struct hfi1_rwqe *wqe; hfi1_get_rwqe() local 195 wqe = get_rwqe_ptr(rq, tail); hfi1_get_rwqe() 204 if (!wr_id_only && !init_sge(qp, wqe)) { hfi1_get_rwqe() 208 qp->r_wr_id = wqe->wr_id; hfi1_get_rwqe() 380 struct hfi1_swqe *wqe; ruc_loopback() local 410 wqe = get_swqe_ptr(sqp, sqp->s_last); ruc_loopback() 450 sqp->s_sge.sge = wqe->sg_list[0]; ruc_loopback() 451 sqp->s_sge.sg_list = wqe->sg_list + 1; ruc_loopback() 452 sqp->s_sge.num_sge = wqe->wr.num_sge; ruc_loopback() 453 sqp->s_len = wqe->length; ruc_loopback() 454 switch (wqe->wr.opcode) { ruc_loopback() 457 wc.ex.imm_data = wqe->wr.ex.imm_data; ruc_loopback() 471 wc.ex.imm_data = wqe->wr.ex.imm_data; ruc_loopback() 481 if (wqe->length == 0) ruc_loopback() 483 if (unlikely(!hfi1_rkey_ok(qp, &qp->r_sge.sge, wqe->length, ruc_loopback() 484 wqe->rdma_wr.remote_addr, ruc_loopback() 485 wqe->rdma_wr.rkey, ruc_loopback() 490 qp->r_sge.total_len = wqe->length; ruc_loopback() 496 if (unlikely(!hfi1_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, ruc_loopback() 497 wqe->rdma_wr.remote_addr, ruc_loopback() 498 wqe->rdma_wr.rkey, ruc_loopback() 504 qp->r_sge.sge = wqe->sg_list[0]; ruc_loopback() 505 qp->r_sge.sg_list = wqe->sg_list + 1; ruc_loopback() 506 qp->r_sge.num_sge = wqe->wr.num_sge; ruc_loopback() 507 qp->r_sge.total_len = wqe->length; ruc_loopback() 515 wqe->atomic_wr.remote_addr, ruc_loopback() 516 wqe->atomic_wr.rkey, ruc_loopback() 521 sdata = wqe->atomic_wr.compare_add; ruc_loopback() 523 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? ruc_loopback() 526 sdata, wqe->atomic_wr.swap); ruc_loopback() 573 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) ruc_loopback() 579 wc.byte_len = wqe->length; ruc_loopback() 587 wqe->wr.send_flags & IB_SEND_SOLICITED); ruc_loopback() 594 hfi1_send_complete(sqp, wqe, send_status); ruc_loopback() 641 hfi1_send_complete(sqp, wqe, send_status); ruc_loopback() 898 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe, hfi1_send_complete() argument 907 for (i = 0; i < wqe->wr.num_sge; i++) { hfi1_send_complete() 908 struct hfi1_sge *sge = &wqe->sg_list[i]; hfi1_send_complete() 915 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); hfi1_send_complete() 919 (wqe->wr.send_flags & IB_SEND_SIGNALED) || hfi1_send_complete() 924 wc.wr_id = wqe->wr.wr_id; hfi1_send_complete() 926 wc.opcode = ib_hfi1_wc_opcode[wqe->wr.opcode]; hfi1_send_complete() 929 wc.byte_len = wqe->length; hfi1_send_complete()
|
H A D | uc.c | 67 struct hfi1_swqe *wqe; hfi1_make_uc_req() local 90 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_uc_req() 91 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); hfi1_make_uc_req() 100 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_uc_req() 115 wqe->psn = qp->s_next_psn; hfi1_make_uc_req() 117 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_uc_req() 118 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_uc_req() 119 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_uc_req() 120 qp->s_sge.total_len = wqe->length; hfi1_make_uc_req() 121 len = wqe->length; hfi1_make_uc_req() 123 switch (wqe->wr.opcode) { hfi1_make_uc_req() 131 if (wqe->wr.opcode == IB_WR_SEND) hfi1_make_uc_req() 137 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_uc_req() 140 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_uc_req() 142 qp->s_wqe = wqe; hfi1_make_uc_req() 150 cpu_to_be64(wqe->rdma_wr.remote_addr); hfi1_make_uc_req() 152 cpu_to_be32(wqe->rdma_wr.rkey); hfi1_make_uc_req() 160 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) hfi1_make_uc_req() 166 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hfi1_make_uc_req() 168 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_uc_req() 171 qp->s_wqe = wqe; hfi1_make_uc_req() 191 if (wqe->wr.opcode == IB_WR_SEND) hfi1_make_uc_req() 196 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_uc_req() 199 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_uc_req() 201 qp->s_wqe = wqe; hfi1_make_uc_req() 216 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) hfi1_make_uc_req() 222 ohdr->u.imm_data = wqe->wr.ex.imm_data; hfi1_make_uc_req() 224 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_uc_req() 227 qp->s_wqe = wqe; hfi1_make_uc_req()
|
H A D | srq.c | 74 struct hfi1_rwqe *wqe; hfi1_post_srq_receive() local 96 wqe = get_rwqe_ptr(&srq->rq, wq->head); hfi1_post_srq_receive() 97 wqe->wr_id = wr->wr_id; hfi1_post_srq_receive() 98 wqe->num_sge = wr->num_sge; hfi1_post_srq_receive() 100 wqe->sg_list[i] = wr->sg_list[i]; hfi1_post_srq_receive() 299 struct hfi1_rwqe *wqe; hfi1_modify_srq() local 302 wqe = get_rwqe_ptr(&srq->rq, tail); hfi1_modify_srq() 303 p->wr_id = wqe->wr_id; hfi1_modify_srq() 304 p->num_sge = wqe->num_sge; hfi1_modify_srq() 305 for (i = 0; i < wqe->num_sge; i++) hfi1_modify_srq() 306 p->sg_list[i] = wqe->sg_list[i]; hfi1_modify_srq()
|
H A D | ud.c | 272 struct hfi1_swqe *wqe; hfi1_make_ud_req() local 296 wqe = get_swqe_ptr(qp, qp->s_last); hfi1_make_ud_req() 297 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); hfi1_make_ud_req() 304 wqe = get_swqe_ptr(qp, qp->s_cur); hfi1_make_ud_req() 312 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; hfi1_make_ud_req() 332 ud_loopback(qp, wqe); hfi1_make_ud_req() 334 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); hfi1_make_ud_req() 340 extra_bytes = -wqe->length & 3; hfi1_make_ud_req() 341 nwords = (wqe->length + extra_bytes) >> 2; hfi1_make_ud_req() 345 qp->s_cur_size = wqe->length; hfi1_make_ud_req() 349 qp->s_wqe = wqe; hfi1_make_ud_req() 350 qp->s_sge.sge = wqe->sg_list[0]; hfi1_make_ud_req() 351 qp->s_sge.sg_list = wqe->sg_list + 1; hfi1_make_ud_req() 352 qp->s_sge.num_sge = wqe->wr.num_sge; hfi1_make_ud_req() 353 qp->s_sge.total_len = wqe->length; hfi1_make_ud_req() 371 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { hfi1_make_ud_req() 373 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data; hfi1_make_ud_req() 400 if (wqe->wr.send_flags & IB_SEND_SOLICITED) hfi1_make_ud_req() 404 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index); hfi1_make_ud_req() 408 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn); hfi1_make_ud_req() 414 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ? hfi1_make_ud_req() 415 qp->qkey : wqe->ud_wr.remote_qkey); hfi1_make_ud_req()
|
H A D | verbs.c | 363 struct hfi1_swqe *wqe; post_one_send() local 415 wqe = get_swqe_ptr(qp, qp->s_head); post_one_send() 420 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); post_one_send() 424 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); post_one_send() 427 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); post_one_send() 429 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); post_one_send() 431 wqe->length = 0; post_one_send() 442 ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j], post_one_send() 446 wqe->length += length; post_one_send() 449 wqe->wr.num_sge = j; post_one_send() 453 if (wqe->length > 0x80000000U) post_one_send() 460 wqe->ssn = qp->s_ssn++; post_one_send() 468 struct hfi1_sge *sge = &wqe->sg_list[--j]; post_one_send() 544 struct hfi1_rwqe *wqe; post_receive() local 565 wqe = get_rwqe_ptr(&qp->r_rq, wq->head); post_receive() 566 wqe->wr_id = wr->wr_id; post_receive() 567 wqe->num_sge = wr->num_sge; post_receive() 569 wqe->sg_list[i] = wr->sg_list[i]; post_receive() 830 if (tx->wqe) verbs_sdma_complete() 831 hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); verbs_sdma_complete() 1048 tx->wqe = qp->s_wqe; hfi1_verbs_send_dma()
|
H A D | qp.c | 414 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); clear_mr_refs() local 417 for (i = 0; i < wqe->wr.num_sge; i++) { clear_mr_refs() 418 struct hfi1_sge *sge = &wqe->sg_list[i]; clear_mr_refs() 425 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); clear_mr_refs() 1638 struct hfi1_swqe *wqe; qp_iter_print() local 1643 wqe = get_swqe_ptr(qp, qp->s_last); qp_iter_print() 1652 wqe ? wqe->wr.opcode : 0, qp_iter_print() 1658 wqe ? wqe->ssn : 0, qp_iter_print()
|
H A D | verbs.h | 1085 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
|
H A D | sdma.h | 383 struct hfi1_swqe *wqe; member in struct:verbs_txreq
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
H A D | odp.c | 297 * @wqe points at the first data segment in the WQE. 311 struct mlx5_ib_pfault *pfault, void *wqe, pagefault_data_segments() 324 wqe += sizeof(struct mlx5_wqe_srq_next_seg); pagefault_data_segments() 331 while (wqe < wqe_end) { pagefault_data_segments() 332 struct mlx5_wqe_data_seg *dseg = wqe; pagefault_data_segments() 342 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, pagefault_data_segments() 345 wqe += sizeof(*dseg); pagefault_data_segments() 380 * Parse initiator WQE. Advances the wqe pointer to point at the 385 void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_initiator_pfault_handler() 388 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; mlx5_ib_mr_initiator_pfault_handler() 389 u16 wqe_index = pfault->mpfault.wqe.wqe_index; mlx5_ib_mr_initiator_pfault_handler() 429 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; mlx5_ib_mr_initiator_pfault_handler() 430 *wqe += sizeof(*ctrl); mlx5_ib_mr_initiator_pfault_handler() 449 *wqe += sizeof(struct mlx5_wqe_raddr_seg); mlx5_ib_mr_initiator_pfault_handler() 455 *wqe += sizeof(struct mlx5_wqe_raddr_seg); mlx5_ib_mr_initiator_pfault_handler() 468 *wqe += sizeof(struct mlx5_wqe_datagram_seg); mlx5_ib_mr_initiator_pfault_handler() 485 * Parse responder WQE. Advances the wqe pointer to point at the 490 void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_responder_pfault_handler() 524 *wqe_end = *wqe + wqe_size; mlx5_ib_mr_responder_pfault_handler() 534 void *wqe, *wqe_end; mlx5_ib_mr_wqe_pfault_handler() local 538 u16 wqe_index = pfault->mpfault.wqe.wqe_index; mlx5_ib_mr_wqe_pfault_handler() 557 wqe = buffer; mlx5_ib_mr_wqe_pfault_handler() 559 ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler() 562 ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe, mlx5_ib_mr_wqe_pfault_handler() 569 if (wqe >= wqe_end) { mlx5_ib_mr_wqe_pfault_handler() 575 ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped, mlx5_ib_mr_wqe_pfault_handler() 310 pagefault_data_segments(struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void *wqe, void *wqe_end, u32 *bytes_mapped, u32 *total_wqe_bytes, int receive_queue) pagefault_data_segments() argument 383 mlx5_ib_mr_initiator_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_initiator_pfault_handler() argument 488 mlx5_ib_mr_responder_pfault_handler( struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault, void **wqe, void **wqe_end, int wqe_length) mlx5_ib_mr_responder_pfault_handler() argument
|
H A D | qp.c | 354 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", calc_sq_size() 2061 static u8 calc_sig(void *wqe, int size) calc_sig() argument 2063 u8 *p = wqe; calc_sig() 2073 static u8 wq_sig(void *wqe) wq_sig() argument 2075 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); wq_sig() 2079 void *wqe, int *sz) set_data_inl_seg() 2089 seg = wqe; set_data_inl_seg() 2090 wqe += sizeof(*seg); set_data_inl_seg() 2099 if (unlikely(wqe + len > qend)) { set_data_inl_seg() 2100 copy = qend - wqe; set_data_inl_seg() 2101 memcpy(wqe, addr, copy); set_data_inl_seg() 2104 wqe = mlx5_get_send_wqe(qp, 0); set_data_inl_seg() 2106 memcpy(wqe, addr, len); set_data_inl_seg() 2107 wqe += len; set_data_inl_seg() 2485 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); dump_wqe() 2078 set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) set_data_inl_seg() argument
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 90 static inline int *wqe_to_link(void *wqe) wqe_to_link() argument 92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); wqe_to_link() 151 void *wqe; mthca_alloc_srq_buf() local 178 next = wqe = get_wqe(srq, i); mthca_alloc_srq_buf() 181 *wqe_to_link(wqe) = i + 1; mthca_alloc_srq_buf() 184 *wqe_to_link(wqe) = -1; mthca_alloc_srq_buf() 188 for (scatter = wqe + sizeof (struct mthca_next_seg); mthca_alloc_srq_buf() 189 (void *) scatter < wqe + (1 << srq->wqe_shift); mthca_alloc_srq_buf() 487 void *wqe; mthca_tavor_post_srq_recv() local 496 wqe = get_wqe(srq, ind); mthca_tavor_post_srq_recv() 497 next_ind = *wqe_to_link(wqe); mthca_tavor_post_srq_recv() 507 srq->last = wqe; mthca_tavor_post_srq_recv() 509 ((struct mthca_next_seg *) wqe)->ee_nds = 0; mthca_tavor_post_srq_recv() 512 wqe += sizeof (struct mthca_next_seg); mthca_tavor_post_srq_recv() 522 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_srq_recv() 523 wqe += sizeof (struct mthca_data_seg); mthca_tavor_post_srq_recv() 527 mthca_set_data_seg_inval(wqe); mthca_tavor_post_srq_recv() 586 void *wqe; mthca_arbel_post_srq_recv() local 592 wqe = get_wqe(srq, ind); mthca_arbel_post_srq_recv() 593 next_ind = *wqe_to_link(wqe); mthca_arbel_post_srq_recv() 602 ((struct mthca_next_seg *) wqe)->ee_nds = 0; mthca_arbel_post_srq_recv() 605 wqe += sizeof (struct mthca_next_seg); mthca_arbel_post_srq_recv() 614 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_srq_recv() 615 wqe += sizeof (struct mthca_data_seg); mthca_arbel_post_srq_recv() 619 mthca_set_data_seg_inval(wqe); mthca_arbel_post_srq_recv()
|
H A D | mthca_qp.c | 1607 void *wqe; mthca_tavor_post_send() local 1643 wqe = get_send_wqe(qp, ind); mthca_tavor_post_send() 1645 qp->sq.last = wqe; mthca_tavor_post_send() 1647 ((struct mthca_next_seg *) wqe)->nda_op = 0; mthca_tavor_post_send() 1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0; mthca_tavor_post_send() 1649 ((struct mthca_next_seg *) wqe)->flags = mthca_tavor_post_send() 1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; mthca_tavor_post_send() 1659 wqe += sizeof (struct mthca_next_seg); mthca_tavor_post_send() 1667 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, mthca_tavor_post_send() 1669 wqe += sizeof (struct mthca_raddr_seg); mthca_tavor_post_send() 1671 set_atomic_seg(wqe, atomic_wr(wr)); mthca_tavor_post_send() 1672 wqe += sizeof (struct mthca_atomic_seg); mthca_tavor_post_send() 1680 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, mthca_tavor_post_send() 1682 wqe += sizeof (struct mthca_raddr_seg); mthca_tavor_post_send() 1697 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, mthca_tavor_post_send() 1699 wqe += sizeof (struct mthca_raddr_seg); mthca_tavor_post_send() 1711 set_tavor_ud_seg(wqe, ud_wr(wr)); mthca_tavor_post_send() 1712 wqe += sizeof (struct mthca_tavor_ud_seg); mthca_tavor_post_send() 1718 wqe - sizeof (struct mthca_next_seg), mthca_tavor_post_send() 1719 wqe); mthca_tavor_post_send() 1724 wqe += sizeof (struct mthca_data_seg); mthca_tavor_post_send() 1737 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_send() 1738 wqe += sizeof (struct mthca_data_seg); mthca_tavor_post_send() 1744 ((struct mthca_data_seg *) wqe)->byte_count = mthca_tavor_post_send() 1746 ((u32 *) wqe)[1] = 0; mthca_tavor_post_send() 1747 wqe += sizeof (struct mthca_data_seg); mthca_tavor_post_send() 1824 void *wqe; mthca_tavor_post_receive() local 1844 wqe = get_recv_wqe(qp, ind); mthca_tavor_post_receive() 1846 qp->rq.last = wqe; mthca_tavor_post_receive() 1848 ((struct mthca_next_seg *) wqe)->ee_nds = mthca_tavor_post_receive() 1850 ((struct mthca_next_seg *) wqe)->flags = 0; mthca_tavor_post_receive() 1852 wqe += sizeof (struct mthca_next_seg); mthca_tavor_post_receive() 1862 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_tavor_post_receive() 1863 wqe += sizeof (struct mthca_data_seg); mthca_tavor_post_receive() 1922 void *wqe; mthca_arbel_post_send() local 1984 wqe = get_send_wqe(qp, ind); mthca_arbel_post_send() 1986 qp->sq.last = wqe; mthca_arbel_post_send() 1988 ((struct mthca_next_seg *) wqe)->flags = mthca_arbel_post_send() 1998 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; mthca_arbel_post_send() 2000 wqe += sizeof (struct mthca_next_seg); mthca_arbel_post_send() 2008 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, mthca_arbel_post_send() 2010 wqe += sizeof (struct mthca_raddr_seg); mthca_arbel_post_send() 2012 set_atomic_seg(wqe, atomic_wr(wr)); mthca_arbel_post_send() 2013 wqe += sizeof (struct mthca_atomic_seg); mthca_arbel_post_send() 2021 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, mthca_arbel_post_send() 2023 wqe += sizeof (struct mthca_raddr_seg); mthca_arbel_post_send() 2038 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, mthca_arbel_post_send() 2040 wqe += sizeof (struct mthca_raddr_seg); mthca_arbel_post_send() 2052 set_arbel_ud_seg(wqe, ud_wr(wr)); mthca_arbel_post_send() 2053 wqe += sizeof (struct mthca_arbel_ud_seg); mthca_arbel_post_send() 2059 wqe - sizeof (struct mthca_next_seg), mthca_arbel_post_send() 2060 wqe); mthca_arbel_post_send() 2065 wqe += sizeof (struct mthca_data_seg); mthca_arbel_post_send() 2078 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_send() 2079 wqe += sizeof (struct mthca_data_seg); mthca_arbel_post_send() 2085 ((struct mthca_data_seg *) wqe)->byte_count = mthca_arbel_post_send() 2087 ((u32 *) wqe)[1] = 0; mthca_arbel_post_send() 2088 wqe += sizeof (struct mthca_data_seg); mthca_arbel_post_send() 2166 void *wqe; mthca_arbel_post_receive() local 2185 wqe = get_recv_wqe(qp, ind); mthca_arbel_post_receive() 2187 ((struct mthca_next_seg *) wqe)->flags = 0; mthca_arbel_post_receive() 2189 wqe += sizeof (struct mthca_next_seg); mthca_arbel_post_receive() 2198 mthca_set_data_seg(wqe, wr->sg_list + i); mthca_arbel_post_receive() 2199 wqe += sizeof (struct mthca_data_seg); mthca_arbel_post_receive() 2203 mthca_set_data_seg_inval(wqe); mthca_arbel_post_receive()
|
H A D | mthca_cq.c | 126 __be32 wqe; member in struct:mthca_cqe 140 __be32 wqe; member in struct:mthca_err_cqe 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); mthca_cq_clean() 388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), handle_error_cqe() 477 cqe->wqe = new_wqe; handle_error_cqe() 511 be32_to_cpu(cqe->wqe)); mthca_poll_one() 540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) mthca_poll_one() 546 u32 wqe = be32_to_cpu(cqe->wqe); mthca_poll_one() local 548 wqe_index = wqe >> srq->wqe_shift; mthca_poll_one() 550 mthca_free_srq_wqe(srq, wqe); mthca_poll_one() 552 s32 wqe; mthca_poll_one() local 554 wqe = be32_to_cpu(cqe->wqe); mthca_poll_one() 555 wqe_index = wqe >> wq->wqe_shift; mthca_poll_one()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_tx.c | 46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); mlx5e_send_nop() local 48 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; mlx5e_send_nop() 60 mlx5e_tx_notify_hw(sq, wqe, 0); mlx5e_send_nop() 162 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); mlx5e_sq_xmit() local 164 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; mlx5e_sq_xmit() 165 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; mlx5e_sq_xmit() 176 memset(wqe, 0, sizeof(*wqe)); mlx5e_sq_xmit() 218 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; mlx5e_sq_xmit() 286 mlx5e_tx_notify_hw(sq, wqe, bf_sz); mlx5e_sq_xmit() 289 /* fill sq edge with nops to avoid wqe wrap around */ mlx5e_sq_xmit()
|
H A D | en_rx.c | 39 struct mlx5e_rx_wqe *wqe, u16 ix) mlx5e_alloc_rx_wqe() 61 wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); mlx5e_alloc_rx_wqe() 81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); mlx5e_post_rx_wqes() local 83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) mlx5e_post_rx_wqes() 86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); mlx5e_post_rx_wqes() 228 struct mlx5e_rx_wqe *wqe; mlx5e_poll_rx_cq() local 242 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); mlx5e_poll_rx_cq() 264 &wqe->next.next_wqe_index); mlx5e_poll_rx_cq() 38 mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) mlx5e_alloc_rx_wqe() argument
|
H A D | qp.c | 144 pfault.wqe.wqe_index = mlx5_eq_pagefault() 145 be16_to_cpu(pf_eqe->wqe.wqe_index); mlx5_eq_pagefault() 146 pfault.wqe.packet_size = mlx5_eq_pagefault() 147 be16_to_cpu(pf_eqe->wqe.packet_length); mlx5_eq_pagefault() 150 qpn, pfault.wqe.wqe_index); mlx5_eq_pagefault()
|
H A D | en.h | 591 struct mlx5e_tx_wqe *wqe, int bf_sz) mlx5e_tx_notify_hw() 595 /* ensure wqe is visible to device before updating doorbell record */ mlx5e_tx_notify_hw() 606 __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); mlx5e_tx_notify_hw() 612 mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); mlx5e_tx_notify_hw() 590 mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5e_tx_wqe *wqe, int bf_sz) mlx5e_tx_notify_hw() argument
|
H A D | en_main.c | 343 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); mlx5e_create_rq() local 346 wqe->data.lkey = c->mkey_be; mlx5e_create_rq() 347 wqe->data.byte_count = mlx5e_create_rq()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 210 __be32 *wqe; stamp_send_wqe() local 225 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); stamp_send_wqe() 226 *wqe = stamp; stamp_send_wqe() 232 wqe = buf + i; stamp_send_wqe() 233 *wqe = cpu_to_be32(0xffffffff); stamp_send_wqe() 242 void *wqe; post_nop_wqe() local 245 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); post_nop_wqe() 249 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; post_nop_wqe() 258 inl = wqe + s; post_nop_wqe() 2144 void *wqe, unsigned *mlx_seg_len) build_sriov_qp0_header() 2148 struct mlx4_wqe_mlx_seg *mlx = wqe; build_sriov_qp0_header() 2149 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; build_sriov_qp0_header() 2266 void *wqe, unsigned *mlx_seg_len) build_mlx_header() 2269 struct mlx4_wqe_mlx_seg *mlx = wqe; build_mlx_header() 2270 struct mlx4_wqe_ctrl_seg *ctrl = wqe; build_mlx_header() 2271 struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; build_mlx_header() 2623 static void build_tunnel_header(struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) build_tunnel_header() argument 2625 struct mlx4_wqe_inline_seg *inl = wqe; build_tunnel_header() 2706 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_ud_wr *wr, build_lso_seg() argument 2710 unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); build_lso_seg() 2719 memcpy(wqe->header, wr->header, wr->hlen); build_lso_seg() 2741 static void add_zero_len_inline(void *wqe) add_zero_len_inline() argument 2743 struct mlx4_wqe_inline_seg *inl = wqe; add_zero_len_inline() 2744 memset(wqe, 0, 16); add_zero_len_inline() 2752 void *wqe; mlx4_ib_post_send() local 2795 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); mlx4_ib_post_send() 2810 wqe += sizeof *ctrl; mlx4_ib_post_send() 2820 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, mlx4_ib_post_send() 2822 wqe += sizeof (struct mlx4_wqe_raddr_seg); mlx4_ib_post_send() 2824 set_atomic_seg(wqe, atomic_wr(wr)); mlx4_ib_post_send() 2825 wqe += sizeof (struct mlx4_wqe_atomic_seg); mlx4_ib_post_send() 2833 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, mlx4_ib_post_send() 2835 wqe += sizeof (struct mlx4_wqe_raddr_seg); mlx4_ib_post_send() 2837 set_masked_atomic_seg(wqe, atomic_wr(wr)); mlx4_ib_post_send() 2838 wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); mlx4_ib_post_send() 2848 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, mlx4_ib_post_send() 2850 wqe += sizeof (struct mlx4_wqe_raddr_seg); mlx4_ib_post_send() 2857 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); mlx4_ib_post_send() 2858 wqe += sizeof (struct mlx4_wqe_local_inval_seg); mlx4_ib_post_send() 2865 set_reg_seg(wqe, reg_wr(wr)); mlx4_ib_post_send() 2866 wqe += sizeof(struct mlx4_wqe_fmr_seg); mlx4_ib_post_send() 2873 set_bind_seg(wqe, bind_mw_wr(wr)); mlx4_ib_post_send() 2874 wqe += sizeof(struct mlx4_wqe_bind_seg); mlx4_ib_post_send() 2890 wqe += seglen; mlx4_ib_post_send() 2896 set_datagram_seg(wqe, ud_wr(wr)); mlx4_ib_post_send() 2898 *(__be32 *) wqe |= cpu_to_be32(0x80000000); mlx4_ib_post_send() 2899 wqe += sizeof (struct mlx4_wqe_datagram_seg); mlx4_ib_post_send() 2903 set_datagram_seg(wqe, ud_wr(wr)); mlx4_ib_post_send() 2904 wqe += sizeof (struct mlx4_wqe_datagram_seg); mlx4_ib_post_send() 2908 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, mlx4_ib_post_send() 2914 lso_wqe = (__be32 *) wqe; mlx4_ib_post_send() 2915 wqe += seglen; mlx4_ib_post_send() 2927 wqe += seglen; mlx4_ib_post_send() 2930 add_zero_len_inline(wqe); mlx4_ib_post_send() 2931 wqe += 16; mlx4_ib_post_send() 2933 build_tunnel_header(ud_wr(wr), wqe, &seglen); mlx4_ib_post_send() local 2934 wqe += seglen; mlx4_ib_post_send() 2943 set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, mlx4_ib_post_send() 2946 wqe += sizeof (struct mlx4_wqe_datagram_seg); mlx4_ib_post_send() 2948 build_tunnel_header(ud_wr(wr), wqe, &seglen); mlx4_ib_post_send() local 2949 wqe += seglen; mlx4_ib_post_send() 2961 wqe += seglen; mlx4_ib_post_send() 2976 dseg = wqe; mlx4_ib_post_send() 3027 * Same optimization applies to padding with NOP wqe mlx4_ib_post_send() 2142 build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) build_sriov_qp0_header() argument 2265 build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_ud_wr *wr, void *wqe, unsigned *mlx_seg_len) build_mlx_header() argument
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | pci.c | 428 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, mlxsw_pci_wqe_frag_map() argument 440 mlxsw_pci_wqe_address_set(wqe, index, mapaddr); mlxsw_pci_wqe_frag_map() 441 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); mlxsw_pci_wqe_frag_map() 445 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, mlxsw_pci_wqe_frag_unmap() argument 449 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); mlxsw_pci_wqe_frag_unmap() 450 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); mlxsw_pci_wqe_frag_unmap() 461 char *wqe = elem_info->elem; mlxsw_pci_rdq_skb_alloc() local 470 /* Assume that wqe was previously zeroed. */ mlxsw_pci_rdq_skb_alloc() 472 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, mlxsw_pci_rdq_skb_alloc() 489 char *wqe; mlxsw_pci_rdq_skb_free() local 492 wqe = elem_info->elem; mlxsw_pci_rdq_skb_free() 494 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); mlxsw_pci_rdq_skb_free() 648 char *wqe; mlxsw_pci_cqe_sdq_handle() local 655 wqe = elem_info->elem; mlxsw_pci_cqe_sdq_handle() 657 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); mlxsw_pci_cqe_sdq_handle() 673 char *wqe; mlxsw_pci_cqe_rdq_handle() local 683 wqe = elem_info->elem; mlxsw_pci_cqe_rdq_handle() 684 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); mlxsw_pci_cqe_rdq_handle() 703 memset(wqe, 0, q->elem_size); mlxsw_pci_cqe_rdq_handle() 1524 char *wqe; mlxsw_pci_skb_transmit() local 1544 wqe = elem_info->elem; mlxsw_pci_skb_transmit() 1545 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ mlxsw_pci_skb_transmit() 1546 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); mlxsw_pci_skb_transmit() 1547 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); mlxsw_pci_skb_transmit() 1549 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, mlxsw_pci_skb_transmit() 1557 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1, mlxsw_pci_skb_transmit() 1567 mlxsw_pci_wqe_byte_count_set(wqe, i, 0); mlxsw_pci_skb_transmit() 1577 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); mlxsw_pci_skb_transmit()
|
H A D | pci.h | 97 MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); 109 MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); 114 MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); 119 MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); 125 MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
H A D | nes_verbs.c | 220 struct nes_hw_qp_wqe *wqe; nes_bind_mw() local 240 wqe = &nesqp->hwqp.sq_vbase[head]; nes_bind_mw() 241 /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */ nes_bind_mw() 242 nes_fill_init_qp_wqe(wqe, nesqp, head); nes_bind_mw() 244 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp); nes_bind_mw() 257 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); nes_bind_mw() 258 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, nes_bind_mw() 260 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); nes_bind_mw() 261 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, nes_bind_mw() 263 wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; nes_bind_mw() 265 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); nes_bind_mw() 3227 fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey) fill_wqe_sg_send() argument 3232 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4), fill_wqe_sg_send() 3234 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4), fill_wqe_sg_send() 3237 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), fill_wqe_sg_send() 3240 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0); fill_wqe_sg_send() 3246 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, fill_wqe_sg_send() 3261 struct nes_hw_qp_wqe *wqe; nes_post_send() local 3291 wqe = &nesqp->hwqp.sq_vbase[head]; nes_post_send() 3292 /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n", nes_post_send() 3293 nesqp->hwqp.qp_id, wqe, head); */ nes_post_send() 3294 nes_fill_init_qp_wqe(wqe, nesqp, head); nes_post_send() 3296 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, nes_post_send() 3312 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX, nes_post_send() 3327 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX], nes_post_send() 3329 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, nes_post_send() 3333 fill_wqe_sg_send(wqe, ib_wr, 1); nes_post_send() 3349 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, nes_post_send() 3351 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, nes_post_send() 3357 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX], nes_post_send() 3359 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX, nes_post_send() 3363 fill_wqe_sg_send(wqe, ib_wr, 1); nes_post_send() 3366 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = nes_post_send() 3367 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]; nes_post_send() 3382 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX, nes_post_send() 3386 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX, nes_post_send() 3388 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX, nes_post_send() 3390 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX, nes_post_send() 3392 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, nes_post_send() 3394 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX, nes_post_send() 3399 set_wqe_32bit_value(wqe->wqe_words, nes_post_send() 3415 set_wqe_64bit_value(wqe->wqe_words, nes_post_send() 3418 set_wqe_32bit_value(wqe->wqe_words, nes_post_send() 3421 set_wqe_32bit_value(wqe->wqe_words, nes_post_send() 3423 set_wqe_32bit_value(wqe->wqe_words, nes_post_send() 3453 set_wqe_64bit_value(wqe->wqe_words, nes_post_send() 3457 set_wqe_32bit_value(wqe->wqe_words, nes_post_send() 3484 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc); nes_post_send() 3523 struct nes_hw_qp_wqe *wqe; nes_post_recv() local 3559 wqe = &nesqp->hwqp.rq_vbase[head]; nes_post_recv() 3561 /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n", nes_post_recv() 3562 nesqp->hwqp.qp_id, wqe, head); */ nes_post_recv() 3563 nes_fill_init_qp_wqe(wqe, nesqp, head); nes_post_recv() 3565 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, nes_post_recv() 3569 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4), nes_post_recv() 3571 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4), nes_post_recv() 3573 set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4), nes_post_recv() 3578 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX, nes_post_recv() 3755 /* Update the wqe index and set status to flush */ nes_poll_cq()
|
H A D | nes.h | 335 nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head) nes_fill_init_qp_wqe() argument 339 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX, nes_fill_init_qp_wqe() 341 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value); nes_fill_init_qp_wqe()
|
H A D | nes_cm.c | 777 struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; build_rdma0_msg() local 781 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); build_rdma0_msg() 783 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; build_rdma0_msg() 784 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; build_rdma0_msg() 789 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = build_rdma0_msg() 791 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; build_rdma0_msg() 792 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; build_rdma0_msg() 793 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; build_rdma0_msg() 802 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = build_rdma0_msg() 804 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1; build_rdma0_msg() 805 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX] = 0; build_rdma0_msg() 806 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] = 0; build_rdma0_msg() 807 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_STAG_IDX] = 1; build_rdma0_msg() 808 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 1; build_rdma0_msg() 3228 struct nes_hw_qp_wqe *wqe; nes_accept() local 3313 wqe = &nesqp->hwqp.sq_vbase[0]; nes_accept() 3341 set_wqe_64bit_value(wqe->wqe_words, nes_accept() 3344 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = nes_accept() 3347 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = nes_accept() 3349 set_wqe_64bit_value(wqe->wqe_words, nes_accept() 3352 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = nes_accept() 3354 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey; nes_accept()
|
H A D | nes_hw.c | 1932 /* clear wqe stall before destroying NIC QP */ nes_destroy_nic_qp() 3111 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n", nes_cqp_ce_handler() 3904 /* If wqe in error was identified, set code to be put into cqe */ flush_wqes()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
H A D | ehca_qp.c | 1066 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe 1076 struct ehca_wqe *wqe; prepare_sqe_rts() local 1079 /* get send wqe pointer */ prepare_sqe_rts() 1092 /* convert wqe pointer to vadr */ prepare_sqe_rts() 1098 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x" prepare_sqe_rts() 1103 /* loop sets wqe's purge bit */ prepare_sqe_rts() 1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); prepare_sqe_rts() 1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) { prepare_sqe_rts() 1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); prepare_sqe_rts() 1109 wqe->nr_of_data_seg = 0; /* suppress data access */ prepare_sqe_rts() 1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ prepare_sqe_rts() 1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); prepare_sqe_rts() 1116 * bad wqe will be reprocessed and ignored when pol_cq() is called, prepare_sqe_rts() 1121 wqe->wqef = 0; prepare_sqe_rts() 1169 /* get send and receive wqe pointer */ check_for_left_cqes() 1388 /* sqe -> rts: set purge bit of bad wqe before actual trans */ internal_modify_qp() 1393 /* mark next free wqe if kernel */ internal_modify_qp() 1395 struct ehca_wqe *wqe; internal_modify_qp() local 1399 /* mark next free wqe */ internal_modify_qp() 1400 wqe = (struct ehca_wqe *) internal_modify_qp() 1402 wqe->optype = wqe->wqef = 0xff; internal_modify_qp() 1404 ibqp->qp_num, wqe); internal_modify_qp()
|
H A D | ehca_reqs.c | 84 /* clear wqe header until sglist */ ehca_write_rwqe() 102 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); ehca_write_rwqe() 174 /* clear wqe header until sglist */ ehca_write_swqe() 310 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); ehca_write_swqe() 679 * ignore this to avoid double cqes of bad wqe ehca_poll_cq_one() 804 struct ehca_wqe *wqe; generate_flush_cqes() local 822 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); generate_flush_cqes() 823 if (!wqe) { generate_flush_cqes() 824 ehca_err(cq->device, "Invalid wqe offset=%#llx on " generate_flush_cqes() 829 wc->wr_id = replace_wr_id(wqe->work_request_id, generate_flush_cqes() 833 switch (wqe->optype) { generate_flush_cqes() 845 wqe->optype); generate_flush_cqes() 851 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) { generate_flush_cqes() 852 wc->ex.imm_data = wqe->immediate_data; generate_flush_cqes()
|
H A D | ehca_classes.h | 175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
H A D | 57xx_hsi_bnx2fc.h | 639 __le16 wqe; member in struct:fcoe_sqe 782 __le16 wqe; member in struct:fcoe_xfrqe 834 __le16 wqe; member in struct:fcoe_cqe 898 __le32 wqe; member in struct:fcoe_lcqe 913 __le16 wqe; member in struct:fcoe_pend_wq_cqe 992 __le16 wqe; member in struct:fcoe_unsolicited_cqe
|
H A D | bnx2fc_hwi.c | 625 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) bnx2fc_process_unsol_compl() argument 644 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); bnx2fc_process_unsol_compl() 645 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { bnx2fc_process_unsol_compl() 647 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> bnx2fc_process_unsol_compl() 872 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) bnx2fc_process_cq_compl() argument 887 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; bnx2fc_process_cq_compl() 997 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) bnx2fc_alloc_work() argument 1006 work->wqe = wqe; bnx2fc_alloc_work() 1017 u16 wqe; bnx2fc_process_new_cqes() local 1035 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == bnx2fc_process_new_cqes() 1040 if (wqe & FCOE_CQE_CQE_TYPE) { bnx2fc_process_new_cqes() 1042 bnx2fc_process_unsol_compl(tgt, wqe); bnx2fc_process_new_cqes() 1047 unsigned int cpu = wqe % num_possible_cpus(); bnx2fc_process_new_cqes() 1054 work = bnx2fc_alloc_work(tgt, wqe); bnx2fc_process_new_cqes() 1065 bnx2fc_process_cq_compl(tgt, wqe); bnx2fc_process_new_cqes() 1390 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; bnx2fc_add_2_sq() 1391 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; bnx2fc_add_2_sq()
|
H A D | bnx2fc.h | 478 u16 wqe; member in struct:bnx2fc_work 574 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
|
H A D | bnx2fc_fcoe.c | 616 bnx2fc_process_cq_compl(work->tgt, work->wqe); bnx2fc_percpu_io_thread() 2563 bnx2fc_process_cq_compl(work->tgt, work->wqe); bnx2fc_percpu_thread_destroy()
|
/linux-4.4.14/drivers/scsi/bfa/ |
H A D | bfa_svc.c | 671 struct bfa_fcxp_wqe_s *wqe; bfa_fcxp_put() local 674 bfa_q_deq(&mod->req_wait_q, &wqe); bfa_fcxp_put() 676 bfa_q_deq(&mod->rsp_wait_q, &wqe); bfa_fcxp_put() 678 if (wqe) { bfa_fcxp_put() 681 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, bfa_fcxp_put() 682 wqe->nrsp_sgles, wqe->req_sga_cbfn, bfa_fcxp_put() 683 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, bfa_fcxp_put() 684 wqe->rsp_sglen_cbfn); bfa_fcxp_put() 686 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); bfa_fcxp_put() 1112 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_req_rsp_alloc_wait() argument 1127 wqe->alloc_cbfn = alloc_cbfn; bfa_fcxp_req_rsp_alloc_wait() 1128 wqe->alloc_cbarg = alloc_cbarg; bfa_fcxp_req_rsp_alloc_wait() 1129 wqe->caller = caller; bfa_fcxp_req_rsp_alloc_wait() 1130 wqe->bfa = bfa; bfa_fcxp_req_rsp_alloc_wait() 1131 wqe->nreq_sgles = nreq_sgles; bfa_fcxp_req_rsp_alloc_wait() 1132 wqe->nrsp_sgles = nrsp_sgles; bfa_fcxp_req_rsp_alloc_wait() 1133 wqe->req_sga_cbfn = req_sga_cbfn; bfa_fcxp_req_rsp_alloc_wait() 1134 wqe->req_sglen_cbfn = req_sglen_cbfn; bfa_fcxp_req_rsp_alloc_wait() 1135 wqe->rsp_sga_cbfn = rsp_sga_cbfn; bfa_fcxp_req_rsp_alloc_wait() 1136 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn; bfa_fcxp_req_rsp_alloc_wait() 1139 list_add_tail(&wqe->qe, &mod->req_wait_q); bfa_fcxp_req_rsp_alloc_wait() 1141 list_add_tail(&wqe->qe, &mod->rsp_wait_q); bfa_fcxp_req_rsp_alloc_wait() 1145 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) bfa_fcxp_walloc_cancel() argument 1149 WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) || bfa_fcxp_walloc_cancel() 1150 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe)); bfa_fcxp_walloc_cancel() 1151 list_del(&wqe->qe); bfa_fcxp_walloc_cancel() 1228 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); bfa_lps_sm_init() 1345 bfa_reqq_wcancel(&lps->wqe); bfa_lps_sm_loginwait() 1374 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); bfa_lps_sm_online() 1395 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe); bfa_lps_sm_online() 1433 bfa_reqq_wcancel(&lps->wqe); bfa_lps_sm_online_n2n_pid_wait() 1444 bfa_reqq_wcancel(&lps->wqe); bfa_lps_sm_online_n2n_pid_wait() 1495 bfa_reqq_wcancel(&lps->wqe); bfa_lps_sm_logowait() 1554 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps); bfa_lps_attach() 5390 struct bfa_sgpg_wqe_s *wqe; bfa_sgpg_mfree() local 5404 wqe = bfa_q_first(&mod->sgpg_wait_q); bfa_sgpg_mfree() 5405 if (mod->free_sgpgs < wqe->nsgpg) bfa_sgpg_mfree() 5408 nsgpg = wqe->nsgpg; bfa_sgpg_mfree() 5409 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg); bfa_sgpg_mfree() 5410 wqe->nsgpg -= nsgpg; bfa_sgpg_mfree() 5411 if (wqe->nsgpg == 0) { bfa_sgpg_mfree() 5412 list_del(&wqe->qe); bfa_sgpg_mfree() 5413 wqe->cbfn(wqe->cbarg); bfa_sgpg_mfree() 5419 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) bfa_sgpg_wait() argument 5426 wqe->nsgpg_total = wqe->nsgpg = nsgpg; bfa_sgpg_wait() 5436 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q); bfa_sgpg_wait() 5437 wqe->nsgpg -= mod->free_sgpgs; bfa_sgpg_wait() 5441 list_add_tail(&wqe->qe, &mod->sgpg_wait_q); bfa_sgpg_wait() 5445 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) bfa_sgpg_wcancel() argument 5449 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe)); bfa_sgpg_wcancel() 5450 list_del(&wqe->qe); bfa_sgpg_wcancel() 5452 if (wqe->nsgpg_total != wqe->nsgpg) bfa_sgpg_wcancel() 5453 bfa_sgpg_mfree(bfa, &wqe->sgpg_q, bfa_sgpg_wcancel() 5454 wqe->nsgpg_total - wqe->nsgpg); bfa_sgpg_wcancel() 5458 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), bfa_sgpg_winit() argument 5461 INIT_LIST_HEAD(&wqe->sgpg_q); bfa_sgpg_winit() 5462 wqe->cbfn = cbfn; bfa_sgpg_winit() 5463 wqe->cbarg = cbarg; bfa_sgpg_winit()
|
H A D | bfa.h | 97 bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), bfa_reqq_winit() argument 100 wqe->qresume = qresume; bfa_reqq_winit() 101 wqe->cbarg = cbarg; bfa_reqq_winit() 108 * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
|
H A D | bfa_svc.h | 77 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, 79 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); 80 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); 417 struct bfa_reqq_wait_s wqe; /* request wait queue element */ member in struct:bfa_lps_s 616 void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 626 struct bfa_fcxp_wqe_s *wqe);
|
H A D | bfa_core.c | 699 struct bfa_reqq_wait_s *wqe; bfa_reqq_resume() local 710 wqe = (struct bfa_reqq_wait_s *) qe; list_for_each_safe() 711 wqe->qresume(wqe->cbarg); list_for_each_safe()
|
/linux-4.4.14/drivers/scsi/lpfc/ |
H A D | lpfc_sli.c | 86 * @wqe: The work Queue Entry to put on the Work queue. 88 * This routine will copy the contents of @wqe to the next available entry on 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) lpfc_sli4_wq_put() argument 106 temp_wqe = q->qe[q->host_index].wqe; lpfc_sli4_wq_put() 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); lpfc_sli4_wq_put() 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli4_wq_put() 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); lpfc_sli4_wq_put() 176 * @wqe: The Mailbox Queue Entry to put on the Work queue. 446 * @wqe: The Receive Queue Entry to put on the Receive queue. 448 * This routine will copy the contents of @wqe to the next available entry on 8144 * @wqe: Pointer to the work queue entry. 8147 * equivalent. The wqe pointer should not have any fields set when 8150 * wqe. 8156 union lpfc_wqe *wqe) lpfc_sli4_iocb2wqe() 8185 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); lpfc_sli4_iocb2wqe() 8188 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ lpfc_sli4_iocb2wqe() 8189 wqe->generic.wqe_com.word10 = 0; lpfc_sli4_iocb2wqe() 8200 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); lpfc_sli4_iocb2wqe() 8201 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); lpfc_sli4_iocb2wqe() 8205 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); lpfc_sli4_iocb2wqe() 8206 xmit_len = wqe->generic.bde.tus.f.bdeSize; lpfc_sli4_iocb2wqe() 8232 wqe->els_req.payload_len = xmit_len; lpfc_sli4_iocb2wqe() 8234 bf_set(wqe_tmo, &wqe->els_req.wqe_com, lpfc_sli4_iocb2wqe() 8237 bf_set(els_req64_vf, &wqe->els_req, 0); lpfc_sli4_iocb2wqe() 8239 bf_set(els_req64_vfid, &wqe->els_req, 0); lpfc_sli4_iocb2wqe() 8241 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, lpfc_sli4_iocb2wqe() 8243 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); lpfc_sli4_iocb2wqe() 8244 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); lpfc_sli4_iocb2wqe() 8259 bf_set(els_req64_sp, &wqe->els_req, 1); lpfc_sli4_iocb2wqe() 8260 bf_set(els_req64_sid, &wqe->els_req, lpfc_sli4_iocb2wqe() 8265 bf_set(els_req64_sid, &wqe->els_req, 0); lpfc_sli4_iocb2wqe() 8266 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); lpfc_sli4_iocb2wqe() 8267 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, lpfc_sli4_iocb2wqe() 8270 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); lpfc_sli4_iocb2wqe() 8271 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, lpfc_sli4_iocb2wqe() 8275 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, lpfc_sli4_iocb2wqe() 8277 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); lpfc_sli4_iocb2wqe() 8278 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); lpfc_sli4_iocb2wqe() 8279 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); lpfc_sli4_iocb2wqe() 8280 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); lpfc_sli4_iocb2wqe() 8281 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); lpfc_sli4_iocb2wqe() 8282 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); lpfc_sli4_iocb2wqe() 8283 wqe->els_req.max_response_payload_len = total_len - xmit_len; lpfc_sli4_iocb2wqe() 8286 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, lpfc_sli4_iocb2wqe() 8288 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, lpfc_sli4_iocb2wqe() 8294 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); lpfc_sli4_iocb2wqe() 8296 /* word3 iocb=io_tag32 wqe=reserved */ lpfc_sli4_iocb2wqe() 8297 wqe->xmit_sequence.rsvd3 = 0; lpfc_sli4_iocb2wqe() 8300 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); lpfc_sli4_iocb2wqe() 8301 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); lpfc_sli4_iocb2wqe() 8302 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, lpfc_sli4_iocb2wqe() 8304 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, lpfc_sli4_iocb2wqe() 8306 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); lpfc_sli4_iocb2wqe() 8307 wqe->xmit_sequence.xmit_len = xmit_len; lpfc_sli4_iocb2wqe() 8311 /* word3 iocb=iotag32 wqe=seq_payload_len */ lpfc_sli4_iocb2wqe() 8312 wqe->xmit_bcast64.seq_payload_len = xmit_len; lpfc_sli4_iocb2wqe() 8313 /* word4 iocb=rsvd wqe=rsvd */ lpfc_sli4_iocb2wqe() 8314 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ lpfc_sli4_iocb2wqe() 8315 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ lpfc_sli4_iocb2wqe() 8316 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, lpfc_sli4_iocb2wqe() 8318 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); lpfc_sli4_iocb2wqe() 8319 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); lpfc_sli4_iocb2wqe() 8320 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, lpfc_sli4_iocb2wqe() 8322 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); lpfc_sli4_iocb2wqe() 8326 /* word3 iocb=iotag wqe=payload_offset_len */ lpfc_sli4_iocb2wqe() 8328 bf_set(payload_offset_len, &wqe->fcp_iwrite, lpfc_sli4_iocb2wqe() 8330 bf_set(cmd_buff_len, &wqe->fcp_iwrite, lpfc_sli4_iocb2wqe() 8332 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ lpfc_sli4_iocb2wqe() 8333 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ lpfc_sli4_iocb2wqe() 8334 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, lpfc_sli4_iocb2wqe() 8336 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); lpfc_sli4_iocb2wqe() 8338 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); lpfc_sli4_iocb2wqe() 8339 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); lpfc_sli4_iocb2wqe() 8340 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, lpfc_sli4_iocb2wqe() 8342 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); lpfc_sli4_iocb2wqe() 8343 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); lpfc_sli4_iocb2wqe() 8344 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); lpfc_sli4_iocb2wqe() 8346 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); lpfc_sli4_iocb2wqe() 8348 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); lpfc_sli4_iocb2wqe() 8349 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, lpfc_sli4_iocb2wqe() 8355 /* word3 iocb=iotag wqe=payload_offset_len */ lpfc_sli4_iocb2wqe() 8357 bf_set(payload_offset_len, &wqe->fcp_iread, lpfc_sli4_iocb2wqe() 8359 bf_set(cmd_buff_len, &wqe->fcp_iread, lpfc_sli4_iocb2wqe() 8361 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ lpfc_sli4_iocb2wqe() 8362 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ lpfc_sli4_iocb2wqe() 8363 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, lpfc_sli4_iocb2wqe() 8365 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); lpfc_sli4_iocb2wqe() 8367 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); lpfc_sli4_iocb2wqe() 8368 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); lpfc_sli4_iocb2wqe() 8369 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, lpfc_sli4_iocb2wqe() 8371 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); lpfc_sli4_iocb2wqe() 8372 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); lpfc_sli4_iocb2wqe() 8373 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); lpfc_sli4_iocb2wqe() 8375 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); lpfc_sli4_iocb2wqe() 8377 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); lpfc_sli4_iocb2wqe() 8378 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, lpfc_sli4_iocb2wqe() 8384 /* word3 iocb=iotag wqe=payload_offset_len */ lpfc_sli4_iocb2wqe() 8386 bf_set(payload_offset_len, &wqe->fcp_icmd, lpfc_sli4_iocb2wqe() 8388 bf_set(cmd_buff_len, &wqe->fcp_icmd, lpfc_sli4_iocb2wqe() 8390 /* word3 iocb=IO_TAG wqe=reserved */ lpfc_sli4_iocb2wqe() 8391 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); lpfc_sli4_iocb2wqe() 8393 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); lpfc_sli4_iocb2wqe() 8394 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); lpfc_sli4_iocb2wqe() 8395 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); lpfc_sli4_iocb2wqe() 8396 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); lpfc_sli4_iocb2wqe() 8397 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, lpfc_sli4_iocb2wqe() 8399 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); lpfc_sli4_iocb2wqe() 8400 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, lpfc_sli4_iocb2wqe() 8403 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); lpfc_sli4_iocb2wqe() 8405 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); lpfc_sli4_iocb2wqe() 8406 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, lpfc_sli4_iocb2wqe() 8424 /* word3 iocb=IO_TAG wqe=request_payload_len */ lpfc_sli4_iocb2wqe() 8425 wqe->gen_req.request_payload_len = xmit_len; lpfc_sli4_iocb2wqe() 8426 /* word4 iocb=parameter wqe=relative_offset memcpy */ lpfc_sli4_iocb2wqe() 8436 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); lpfc_sli4_iocb2wqe() 8437 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); lpfc_sli4_iocb2wqe() 8438 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); lpfc_sli4_iocb2wqe() 8439 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); lpfc_sli4_iocb2wqe() 8440 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); lpfc_sli4_iocb2wqe() 8441 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); lpfc_sli4_iocb2wqe() 8442 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); lpfc_sli4_iocb2wqe() 8443 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); lpfc_sli4_iocb2wqe() 8444 wqe->gen_req.max_response_payload_len = total_len - xmit_len; lpfc_sli4_iocb2wqe() 8450 /* word3 iocb=iotag32 wqe=response_payload_len */ lpfc_sli4_iocb2wqe() 8451 wqe->xmit_els_rsp.response_payload_len = xmit_len; lpfc_sli4_iocb2wqe() 8453 wqe->xmit_els_rsp.word4 = 0; lpfc_sli4_iocb2wqe() 8455 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, lpfc_sli4_iocb2wqe() 8462 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); lpfc_sli4_iocb2wqe() 8463 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, lpfc_sli4_iocb2wqe() 8467 &wqe->xmit_els_rsp.wqe_dest, 0); lpfc_sli4_iocb2wqe() 8471 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8473 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); lpfc_sli4_iocb2wqe() 8474 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8477 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8479 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); lpfc_sli4_iocb2wqe() 8480 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); lpfc_sli4_iocb2wqe() 8481 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); lpfc_sli4_iocb2wqe() 8482 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8484 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); lpfc_sli4_iocb2wqe() 8485 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, lpfc_sli4_iocb2wqe() 8490 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); lpfc_sli4_iocb2wqe() 8491 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, lpfc_sli4_iocb2wqe() 8493 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); lpfc_sli4_iocb2wqe() 8494 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8517 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); lpfc_sli4_iocb2wqe() 8519 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); lpfc_sli4_iocb2wqe() 8520 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); lpfc_sli4_iocb2wqe() 8521 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ lpfc_sli4_iocb2wqe() 8522 wqe->abort_cmd.rsrvd5 = 0; lpfc_sli4_iocb2wqe() 8523 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, lpfc_sli4_iocb2wqe() 8530 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); lpfc_sli4_iocb2wqe() 8531 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); lpfc_sli4_iocb2wqe() 8532 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, lpfc_sli4_iocb2wqe() 8544 memset(wqe, 0, sizeof(union lpfc_wqe)); lpfc_sli4_iocb2wqe() 8546 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8554 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8561 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8564 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); lpfc_sli4_iocb2wqe() 8565 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); lpfc_sli4_iocb2wqe() 8568 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, lpfc_sli4_iocb2wqe() 8570 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8572 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); lpfc_sli4_iocb2wqe() 8573 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8575 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); lpfc_sli4_iocb2wqe() 8576 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, lpfc_sli4_iocb2wqe() 8581 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8583 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8585 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, lpfc_sli4_iocb2wqe() 8605 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); lpfc_sli4_iocb2wqe() 8607 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); lpfc_sli4_iocb2wqe() 8609 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); lpfc_sli4_iocb2wqe() 8612 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); lpfc_sli4_iocb2wqe() 8613 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); lpfc_sli4_iocb2wqe() 8614 wqe->generic.wqe_com.abort_tag = abort_tag; lpfc_sli4_iocb2wqe() 8615 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); lpfc_sli4_iocb2wqe() 8616 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); lpfc_sli4_iocb2wqe() 8617 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); lpfc_sli4_iocb2wqe() 8618 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); lpfc_sli4_iocb2wqe() 8641 union lpfc_wqe wqe; __lpfc_sli_issue_iocb_s4() local 8691 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) __lpfc_sli_issue_iocb_s4() 8701 if (lpfc_sli4_wq_put(wq, &wqe)) __lpfc_sli_issue_iocb_s4() 8706 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) __lpfc_sli_issue_iocb_s4() 11851 "2579 Slow-path wqe consume event carries " lpfc_sli4_sp_handle_rel_wcqe() 12231 "2580 Fast-path wqe consume event carries " lpfc_sli4_fp_handle_rel_wcqe() 16971 union lpfc_wqe wqe; lpfc_drain_txq() local 17010 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) lpfc_drain_txq() 17011 fail_msg = "to convert iocb to wqe"; lpfc_drain_txq() 17012 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) lpfc_drain_txq() 8155 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, union lpfc_wqe *wqe) lpfc_sli4_iocb2wqe() argument
|
H A D | lpfc_sli4.h | 123 union lpfc_wqe *wqe; member in union:sli4_qe
|
H A D | lpfc_hw4.h | 358 /* completion queue entry for wqe completions */ 405 /* completion queue entry for wqe release */
|
/linux-4.4.14/include/linux/mlx5/ |
H A D | device.h | 339 * Max wqe size for rdma read is 512 bytes, so this 340 * limits our max_sge_rd as the wqe needs to fit: 522 } __packed wqe; member in union:mlx5_eqe_page_fault::__anon12769
|
H A D | qp.h | 422 } wqe; member in union:mlx5_pagefault::__anon12890
|
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
H A D | liquidio_common.h | 235 /* wqe 237 * | wqe word0-3 |
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
H A D | cvmx-helper.h | 39 #include <asm/octeon/cvmx-wqe.h>
|
H A D | cvmx-wqe.h | 30 * This header file defines the work queue entry (wqe) data structure. 33 * in this file to create a single point of definition of the wqe
|
H A D | cvmx-pip.h | 36 #include <asm/octeon/cvmx-wqe.h>
|
H A D | cvmx-pow.h | 57 #include <asm/octeon/cvmx-wqe.h> 1914 * update the wqe in DRAM to match arguments. 1997 * update the wqe in DRAM to match arguments.
|
/linux-4.4.14/drivers/staging/octeon/ |
H A D | ethernet-rx.c | 40 #include <asm/octeon/cvmx-wqe.h>
|
H A D | ethernet-tx.c | 34 #include <asm/octeon/cvmx-wqe.h>
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma.h | 405 /* provide synchronization to multiple context(s) posting wqe, rqe */
|
H A D | ocrdma_verbs.c | 1517 /* syncronize with wqe, rqe posting and cqe processing contexts */ ocrdma_modify_qp() 2299 /* make sure wqe is written before adapter can access it */ ocrdma_post_send() 2605 /* if wqe/rqe pending for which cqe needs to be returned, ocrdma_update_err_cqe()
|
H A D | ocrdma_sli.h | 1867 /* extended wqe followed by hdr_wqe for Fast Memory register */
|
H A D | ocrdma_hw.c | 2145 /* sync with wqe and rqe posting */ ocrdma_qp_state_change()
|
/linux-4.4.14/include/linux/mlx4/ |
H A D | device.h | 431 * Max wqe size for rdma read is 512 bytes, so this 432 * limits our max_sge_rd as the wqe needs to fit:
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
H A D | bnx2i.h | 382 * @num_wqe_sent: statistic counter, total wqe's sent
|
H A D | bnx2i_iscsi.c | 1127 * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_tx.c | 870 /* Copy dst mac address to wqe. This allows loopback in eSwitch, mlx4_en_xmit()
|
H A D | main.c | 415 * phv bit was reported correctly in the wqe. To allow QinQ mlx4_dev_cap()
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
H A D | verbs.c | 582 dprintk("RPC: %s: insufficient wqe's available\n", rpcrdma_ep_create()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
H A D | cnic_defs.h | 2942 __le16 wqe; member in struct:fcoe_sqe 3085 __le16 wqe; member in struct:fcoe_xfrqe
|