/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_send() argument 51 wqe->send.rdmaop = T3_SEND_WITH_SE; in build_rdma_send() 53 wqe->send.rdmaop = T3_SEND; in build_rdma_send() 54 wqe->send.rem_stag = 0; in build_rdma_send() 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; in build_rdma_send() 60 wqe->send.rdmaop = T3_SEND_WITH_INV; in build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 68 wqe->send.reserved[0] = 0; in build_rdma_send() 69 wqe->send.reserved[1] = 0; in build_rdma_send() 70 wqe->send.reserved[2] = 0; in build_rdma_send() [all …]
|
D | cxio_hal.c | 140 struct t3_modify_qp_wr *wqe; in cxio_hal_clear_qp_ctx() local 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_clear_qp_ctx() 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); in cxio_hal_clear_qp_ctx() 147 memset(wqe, 0, sizeof(*wqe)); in cxio_hal_clear_qp_ctx() 148 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, in cxio_hal_clear_qp_ctx() 151 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); in cxio_hal_clear_qp_ctx() 153 wqe->sge_cmd = cpu_to_be64(sge_cmd); in cxio_hal_clear_qp_ctx() 519 struct t3_modify_qp_wr *wqe; in cxio_hal_init_ctrl_qp() local 522 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_init_ctrl_qp() 565 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); in cxio_hal_init_ctrl_qp() [all …]
|
D | cxio_wr.h | 416 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe) in fw_riwrh_opcode() argument 418 return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags)); in fw_riwrh_opcode() 427 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op, in build_fw_riwrh() argument 431 wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) | in build_fw_riwrh() 435 wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) | in build_fw_riwrh() 439 ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit); in build_fw_riwrh()
|
D | cxio_dbg.c | 111 void cxio_dump_wqe(union t3_wr *wqe) in cxio_dump_wqe() argument 113 __be64 *data = (__be64 *)wqe; in cxio_dump_wqe()
|
D | cxio_hal.h | 205 void cxio_dump_wqe(union t3_wr *wqe);
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 458 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 470 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 473 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 475 wqe->send.stag_inv = 0; in build_rdma_send() 479 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 482 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 484 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 490 wqe->send.r3 = 0; in build_rdma_send() 491 wqe->send.r4 = 0; in build_rdma_send() 496 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
D | t4.h | 109 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, in init_wr_hdr() argument 112 wqe->send.opcode = (u8)opcode; in init_wr_hdr() 113 wqe->send.flags = flags; in init_wr_hdr() 114 wqe->send.wrid = wrid; in init_wr_hdr() 115 wqe->send.r1[0] = 0; in init_wr_hdr() 116 wqe->send.r1[1] = 0; in init_wr_hdr() 117 wqe->send.r1[2] = 0; in init_wr_hdr() 118 wqe->send.len16 = len16; in init_wr_hdr() 458 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) in t4_ring_sq_db() argument 464 if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { in t4_ring_sq_db() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, in restart_sge() argument 48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 49 ss->sge = wqe->sg_list[0]; in restart_sge() 50 ss->sg_list = wqe->sg_list + 1; in restart_sge() 51 ss->num_sge = wqe->wr.num_sge; in restart_sge() 52 ss->total_len = wqe->length; in restart_sge() 54 return wqe->length - len; in restart_sge() 236 struct qib_swqe *wqe; in qib_make_rc_req() local 273 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() [all …]
|
D | qib_ruc.c | 82 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) in qib_init_sge() argument 95 for (i = j = 0; i < wqe->num_sge; i++) { in qib_init_sge() 96 if (wqe->sg_list[i].length == 0) in qib_init_sge() 100 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in qib_init_sge() 102 qp->r_len += wqe->sg_list[i].length; in qib_init_sge() 118 wc.wr_id = wqe->wr_id; in qib_init_sge() 145 struct qib_rwqe *wqe; in qib_get_rwqe() local 177 wqe = get_rwqe_ptr(rq, tail); in qib_get_rwqe() 186 if (!wr_id_only && !qib_init_sge(qp, wqe)) { in qib_get_rwqe() 190 qp->r_wr_id = wqe->wr_id; in qib_get_rwqe() [all …]
|
D | qib_uc.c | 49 struct qib_swqe *wqe; in qib_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req() 97 wqe->psn = qp->s_next_psn; in qib_make_uc_req() 99 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req() 100 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req() 101 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req() 102 qp->s_sge.total_len = wqe->length; in qib_make_uc_req() 103 len = wqe->length; in qib_make_uc_req() [all …]
|
D | qib_ud.c | 241 struct qib_swqe *wqe; in qib_make_ud_req() local 264 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req() 265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req() 280 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; in qib_make_ud_req() 303 qib_ud_loopback(qp, wqe); in qib_make_ud_req() 305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req() 311 extra_bytes = -wqe->length & 3; in qib_make_ud_req() 312 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req() 316 qp->s_cur_size = wqe->length; in qib_make_ud_req() [all …]
|
D | qib_srq.c | 57 struct qib_rwqe *wqe; in qib_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in qib_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in qib_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in qib_post_srq_receive() 282 struct qib_rwqe *wqe; in qib_modify_srq() local 285 wqe = get_rwqe_ptr(&srq->rq, tail); in qib_modify_srq() 286 p->wr_id = wqe->wr_id; in qib_modify_srq() 287 p->num_sge = wqe->num_sge; in qib_modify_srq() 288 for (i = 0; i < wqe->num_sge; i++) in qib_modify_srq() [all …]
|
D | qib_verbs.c | 340 struct qib_swqe *wqe; in qib_post_one_send() local 399 wqe = get_swqe_ptr(qp, qp->s_head); in qib_post_one_send() 403 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); in qib_post_one_send() 405 memcpy(&wqe->reg_wr, reg_wr(wr), in qib_post_one_send() 406 sizeof(wqe->reg_wr)); in qib_post_one_send() 410 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); in qib_post_one_send() 413 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); in qib_post_one_send() 415 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); in qib_post_one_send() 417 wqe->length = 0; in qib_post_one_send() 428 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], in qib_post_one_send() [all …]
|
D | qib_qp.c | 429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); in clear_mr_refs() local 432 for (i = 0; i < wqe->wr.num_sge; i++) { in clear_mr_refs() 433 struct qib_sge *sge = &wqe->sg_list[i]; in clear_mr_refs() 440 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); in clear_mr_refs() 1367 struct qib_swqe *wqe; in qib_qp_iter_print() local 1370 wqe = get_swqe_ptr(qp, qp->s_last); in qib_qp_iter_print() 1377 wqe->wr.opcode, in qib_qp_iter_print() 1383 wqe->ssn, in qib_qp_iter_print()
|
D | qib.h | 262 struct qib_swqe *wqe; member
|
D | qib_verbs.h | 1114 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_rc.c | 42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; in restart_sge() 48 ss->sge = wqe->sg_list[0]; in restart_sge() 49 ss->sg_list = wqe->sg_list + 1; in restart_sge() 50 ss->num_sge = wqe->wr.num_sge; in restart_sge() 52 return wqe->length - len; in restart_sge() 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart() 218 struct ipath_swqe *wqe; in ipath_make_rc_req() local 256 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_rc_req() [all …]
|
D | ipath_ruc.c | 122 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument 129 for (i = j = 0; i < wqe->num_sge; i++) { in ipath_init_sge() 130 if (wqe->sg_list[i].length == 0) in ipath_init_sge() 134 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in ipath_init_sge() 136 *lengthp += wqe->sg_list[i].length; in ipath_init_sge() 145 wc.wr_id = wqe->wr_id; in ipath_init_sge() 171 struct ipath_rwqe *wqe; in ipath_get_rwqe() local 204 wqe = get_rwqe_ptr(rq, tail); in ipath_get_rwqe() 210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); in ipath_get_rwqe() 211 qp->r_wr_id = wqe->wr_id; in ipath_get_rwqe() [all …]
|
D | ipath_uc.c | 49 struct ipath_swqe *wqe; in ipath_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); in ipath_make_uc_req() 97 qp->s_psn = wqe->psn = qp->s_next_psn; in ipath_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; in ipath_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; in ipath_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; in ipath_make_uc_req() 101 qp->s_len = len = wqe->length; in ipath_make_uc_req() 102 switch (wqe->wr.opcode) { in ipath_make_uc_req() [all …]
|
D | ipath_ud.c | 60 struct ipath_rwqe *wqe; in ipath_ud_loopback() local 132 wqe = get_rwqe_ptr(rq, tail); in ipath_ud_loopback() 134 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback() 148 wc.wr_id = wqe->wr_id; in ipath_ud_loopback() 245 struct ipath_swqe *wqe; in ipath_make_ud_req() local 268 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_ud_req() 269 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_ud_req() 276 wqe = get_swqe_ptr(qp, qp->s_cur); in ipath_make_ud_req() 282 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; in ipath_make_ud_req() 305 ipath_ud_loopback(qp, wqe); in ipath_make_ud_req() [all …]
|
D | ipath_srq.c | 57 struct ipath_rwqe *wqe; in ipath_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in ipath_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in ipath_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in ipath_post_srq_receive() 286 struct ipath_rwqe *wqe; in ipath_modify_srq() local 289 wqe = get_rwqe_ptr(&srq->rq, tail); in ipath_modify_srq() 290 p->wr_id = wqe->wr_id; in ipath_modify_srq() 291 p->num_sge = wqe->num_sge; in ipath_modify_srq() 292 for (i = 0; i < wqe->num_sge; i++) in ipath_modify_srq() [all …]
|
D | ipath_verbs.c | 338 struct ipath_swqe *wqe; in ipath_post_one_send() local 397 wqe = get_swqe_ptr(qp, qp->s_head); in ipath_post_one_send() 401 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); in ipath_post_one_send() 405 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); in ipath_post_one_send() 408 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); in ipath_post_one_send() 410 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); in ipath_post_one_send() 412 wqe->length = 0; in ipath_post_one_send() 422 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], in ipath_post_one_send() 426 wqe->length += length; in ipath_post_one_send() 429 wqe->wr.num_sge = j; in ipath_post_one_send() [all …]
|
D | ipath_verbs.h | 653 struct ipath_swqe *wqe; member 866 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, 880 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | rc.c | 63 static u32 restart_sge(struct hfi1_sge_state *ss, struct hfi1_swqe *wqe, in restart_sge() argument 68 len = delta_psn(psn, wqe->psn) * pmtu; in restart_sge() 69 ss->sge = wqe->sg_list[0]; in restart_sge() 70 ss->sg_list = wqe->sg_list + 1; in restart_sge() 71 ss->num_sge = wqe->wr.num_sge; in restart_sge() 72 ss->total_len = wqe->length; in restart_sge() 74 return wqe->length - len; in restart_sge() 265 struct hfi1_swqe *wqe; in hfi1_make_rc_req() local 305 wqe = get_swqe_ptr(qp, qp->s_last); in hfi1_make_rc_req() 306 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req() [all …]
|
D | ruc.c | 100 static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) in init_sge() argument 113 for (i = j = 0; i < wqe->num_sge; i++) { in init_sge() 114 if (wqe->sg_list[i].length == 0) in init_sge() 118 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in init_sge() 120 qp->r_len += wqe->sg_list[i].length; in init_sge() 136 wc.wr_id = wqe->wr_id; in init_sge() 163 struct hfi1_rwqe *wqe; in hfi1_get_rwqe() local 195 wqe = get_rwqe_ptr(rq, tail); in hfi1_get_rwqe() 204 if (!wr_id_only && !init_sge(qp, wqe)) { in hfi1_get_rwqe() 208 qp->r_wr_id = wqe->wr_id; in hfi1_get_rwqe() [all …]
|
D | uc.c | 67 struct hfi1_swqe *wqe; in hfi1_make_uc_req() local 90 wqe = get_swqe_ptr(qp, qp->s_last); in hfi1_make_uc_req() 91 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_uc_req() 100 wqe = get_swqe_ptr(qp, qp->s_cur); in hfi1_make_uc_req() 115 wqe->psn = qp->s_next_psn; in hfi1_make_uc_req() 117 qp->s_sge.sge = wqe->sg_list[0]; in hfi1_make_uc_req() 118 qp->s_sge.sg_list = wqe->sg_list + 1; in hfi1_make_uc_req() 119 qp->s_sge.num_sge = wqe->wr.num_sge; in hfi1_make_uc_req() 120 qp->s_sge.total_len = wqe->length; in hfi1_make_uc_req() 121 len = wqe->length; in hfi1_make_uc_req() [all …]
|
D | srq.c | 74 struct hfi1_rwqe *wqe; in hfi1_post_srq_receive() local 96 wqe = get_rwqe_ptr(&srq->rq, wq->head); in hfi1_post_srq_receive() 97 wqe->wr_id = wr->wr_id; in hfi1_post_srq_receive() 98 wqe->num_sge = wr->num_sge; in hfi1_post_srq_receive() 100 wqe->sg_list[i] = wr->sg_list[i]; in hfi1_post_srq_receive() 299 struct hfi1_rwqe *wqe; in hfi1_modify_srq() local 302 wqe = get_rwqe_ptr(&srq->rq, tail); in hfi1_modify_srq() 303 p->wr_id = wqe->wr_id; in hfi1_modify_srq() 304 p->num_sge = wqe->num_sge; in hfi1_modify_srq() 305 for (i = 0; i < wqe->num_sge; i++) in hfi1_modify_srq() [all …]
|
D | ud.c | 272 struct hfi1_swqe *wqe; in hfi1_make_ud_req() local 296 wqe = get_swqe_ptr(qp, qp->s_last); in hfi1_make_ud_req() 297 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in hfi1_make_ud_req() 304 wqe = get_swqe_ptr(qp, qp->s_cur); in hfi1_make_ud_req() 312 ah_attr = &to_iah(wqe->ud_wr.ah)->attr; in hfi1_make_ud_req() 332 ud_loopback(qp, wqe); in hfi1_make_ud_req() 334 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); in hfi1_make_ud_req() 340 extra_bytes = -wqe->length & 3; in hfi1_make_ud_req() 341 nwords = (wqe->length + extra_bytes) >> 2; in hfi1_make_ud_req() 345 qp->s_cur_size = wqe->length; in hfi1_make_ud_req() [all …]
|
D | verbs.c | 363 struct hfi1_swqe *wqe; in post_one_send() local 415 wqe = get_swqe_ptr(qp, qp->s_head); in post_one_send() 420 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); in post_one_send() 424 memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); in post_one_send() 427 memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); in post_one_send() 429 memcpy(&wqe->wr, wr, sizeof(wqe->wr)); in post_one_send() 431 wqe->length = 0; in post_one_send() 442 ok = hfi1_lkey_ok(rkt, pd, &wqe->sg_list[j], in post_one_send() 446 wqe->length += length; in post_one_send() 449 wqe->wr.num_sge = j; in post_one_send() [all …]
|
D | qp.c | 414 struct hfi1_swqe *wqe = get_swqe_ptr(qp, qp->s_last); in clear_mr_refs() local 417 for (i = 0; i < wqe->wr.num_sge; i++) { in clear_mr_refs() 418 struct hfi1_sge *sge = &wqe->sg_list[i]; in clear_mr_refs() 425 atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); in clear_mr_refs() 1638 struct hfi1_swqe *wqe; in qp_iter_print() local 1643 wqe = get_swqe_ptr(qp, qp->s_last); in qp_iter_print() 1652 wqe ? wqe->wr.opcode : 0, in qp_iter_print() 1658 wqe ? wqe->ssn : 0, in qp_iter_print()
|
D | sdma.h | 383 struct hfi1_swqe *wqe; member
|
D | verbs.h | 1085 void hfi1_send_complete(struct hfi1_qp *qp, struct hfi1_swqe *wqe,
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | odp.c | 311 struct mlx5_ib_pfault *pfault, void *wqe, in pagefault_data_segments() argument 324 wqe += sizeof(struct mlx5_wqe_srq_next_seg); in pagefault_data_segments() 331 while (wqe < wqe_end) { in pagefault_data_segments() 332 struct mlx5_wqe_data_seg *dseg = wqe; in pagefault_data_segments() 342 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, in pagefault_data_segments() 345 wqe += sizeof(*dseg); in pagefault_data_segments() 385 void **wqe, void **wqe_end, int wqe_length) in mlx5_ib_mr_initiator_pfault_handler() argument 388 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; in mlx5_ib_mr_initiator_pfault_handler() 389 u16 wqe_index = pfault->mpfault.wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler() 429 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; in mlx5_ib_mr_initiator_pfault_handler() [all …]
|
D | qp.c | 2061 static u8 calc_sig(void *wqe, int size) in calc_sig() argument 2063 u8 *p = wqe; in calc_sig() 2073 static u8 wq_sig(void *wqe) in wq_sig() argument 2075 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); in wq_sig() 2079 void *wqe, int *sz) in set_data_inl_seg() argument 2089 seg = wqe; in set_data_inl_seg() 2090 wqe += sizeof(*seg); in set_data_inl_seg() 2099 if (unlikely(wqe + len > qend)) { in set_data_inl_seg() 2100 copy = qend - wqe; in set_data_inl_seg() 2101 memcpy(wqe, addr, copy); in set_data_inl_seg() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 90 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 151 void *wqe; in mthca_alloc_srq_buf() local 178 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 181 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 184 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 188 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 189 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 487 void *wqe; in mthca_tavor_post_srq_recv() local 496 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|
D | mthca_qp.c | 1607 void *wqe; in mthca_tavor_post_send() local 1643 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1645 qp->sq.last = wqe; in mthca_tavor_post_send() 1647 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1649 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1659 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1667 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, in mthca_tavor_post_send() 1669 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send() [all …]
|
D | mthca_cq.c | 126 __be32 wqe; member 140 __be32 wqe; member 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean() 388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe() 477 cqe->wqe = new_wqe; in handle_error_cqe() 511 be32_to_cpu(cqe->wqe)); in mthca_poll_one() 540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one() 546 u32 wqe = be32_to_cpu(cqe->wqe); in mthca_poll_one() local 548 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one() 550 mthca_free_srq_wqe(srq, wqe); in mthca_poll_one() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.h | 97 MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1); 109 MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); 114 MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); 119 MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); 125 MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
|
D | pci.c | 428 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, in mlxsw_pci_wqe_frag_map() argument 440 mlxsw_pci_wqe_address_set(wqe, index, mapaddr); in mlxsw_pci_wqe_frag_map() 441 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); in mlxsw_pci_wqe_frag_map() 445 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, in mlxsw_pci_wqe_frag_unmap() argument 449 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); in mlxsw_pci_wqe_frag_unmap() 450 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); in mlxsw_pci_wqe_frag_unmap() 461 char *wqe = elem_info->elem; in mlxsw_pci_rdq_skb_alloc() local 472 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, in mlxsw_pci_rdq_skb_alloc() 489 char *wqe; in mlxsw_pci_rdq_skb_free() local 492 wqe = elem_info->elem; in mlxsw_pci_rdq_skb_free() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rx.c | 39 struct mlx5e_rx_wqe *wqe, u16 ix) in mlx5e_alloc_rx_wqe() argument 61 wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); in mlx5e_alloc_rx_wqe() 81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); in mlx5e_post_rx_wqes() local 83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) in mlx5e_post_rx_wqes() 86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); in mlx5e_post_rx_wqes() 228 struct mlx5e_rx_wqe *wqe; in mlx5e_poll_rx_cq() local 242 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); in mlx5e_poll_rx_cq() 264 &wqe->next.next_wqe_index); in mlx5e_poll_rx_cq()
|
D | en_tx.c | 46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_send_nop() local 48 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_send_nop() 60 mlx5e_tx_notify_hw(sq, wqe, 0); in mlx5e_send_nop() 162 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_sq_xmit() local 164 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; in mlx5e_sq_xmit() 165 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; in mlx5e_sq_xmit() 176 memset(wqe, 0, sizeof(*wqe)); in mlx5e_sq_xmit() 218 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; in mlx5e_sq_xmit() 286 mlx5e_tx_notify_hw(sq, wqe, bf_sz); in mlx5e_sq_xmit()
|
D | qp.c | 144 pfault.wqe.wqe_index = in mlx5_eq_pagefault() 145 be16_to_cpu(pf_eqe->wqe.wqe_index); in mlx5_eq_pagefault() 146 pfault.wqe.packet_size = in mlx5_eq_pagefault() 147 be16_to_cpu(pf_eqe->wqe.packet_length); in mlx5_eq_pagefault() 150 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault()
|
D | en.h | 591 struct mlx5e_tx_wqe *wqe, int bf_sz) in mlx5e_tx_notify_hw() argument 606 __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); in mlx5e_tx_notify_hw() 612 mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); in mlx5e_tx_notify_hw()
|
D | en_main.c | 343 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); in mlx5e_create_rq() local 346 wqe->data.lkey = c->mkey_be; in mlx5e_create_rq() 347 wqe->data.byte_count = in mlx5e_create_rq()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | qp.c | 210 __be32 *wqe; in stamp_send_wqe() local 225 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe() 226 *wqe = stamp; in stamp_send_wqe() 232 wqe = buf + i; in stamp_send_wqe() 233 *wqe = cpu_to_be32(0xffffffff); in stamp_send_wqe() 242 void *wqe; in post_nop_wqe() local 245 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe() 249 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; in post_nop_wqe() 258 inl = wqe + s; in post_nop_wqe() 2144 void *wqe, unsigned *mlx_seg_len) in build_sriov_qp0_header() argument [all …]
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 220 struct nes_hw_qp_wqe *wqe; in nes_bind_mw() local 240 wqe = &nesqp->hwqp.sq_vbase[head]; in nes_bind_mw() 242 nes_fill_init_qp_wqe(wqe, nesqp, head); in nes_bind_mw() 244 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp); in nes_bind_mw() 257 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); in nes_bind_mw() 258 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, in nes_bind_mw() 260 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); in nes_bind_mw() 261 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, in nes_bind_mw() 263 wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; in nes_bind_mw() 265 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); in nes_bind_mw() [all …]
|
D | nes.h | 335 nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head) in nes_fill_init_qp_wqe() argument 339 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX, in nes_fill_init_qp_wqe() 341 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value); in nes_fill_init_qp_wqe()
|
D | nes_cm.c | 777 struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; in build_rdma0_msg() local 781 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); in build_rdma0_msg() 783 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; in build_rdma0_msg() 784 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; in build_rdma0_msg() 789 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = in build_rdma0_msg() 791 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; in build_rdma0_msg() 792 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; in build_rdma0_msg() 793 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; in build_rdma0_msg() 802 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = in build_rdma0_msg() 804 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1; in build_rdma0_msg() [all …]
|
/linux-4.4.14/drivers/scsi/lpfc/ |
D | lpfc_sli.c | 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) in lpfc_sli4_wq_put() argument 106 temp_wqe = q->qe[q->host_index].wqe; in lpfc_sli4_wq_put() 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); in lpfc_sli4_wq_put() 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); in lpfc_sli4_wq_put() 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); in lpfc_sli4_wq_put() 8156 union lpfc_wqe *wqe) in lpfc_sli4_iocb2wqe() argument 8185 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); in lpfc_sli4_iocb2wqe() 8188 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ in lpfc_sli4_iocb2wqe() 8189 wqe->generic.wqe_com.word10 = 0; in lpfc_sli4_iocb2wqe() 8200 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); in lpfc_sli4_iocb2wqe() [all …]
|
D | lpfc_sli4.h | 123 union lpfc_wqe *wqe; member
|
/linux-4.4.14/drivers/scsi/bfa/ |
D | bfa_svc.c | 671 struct bfa_fcxp_wqe_s *wqe; in bfa_fcxp_put() local 674 bfa_q_deq(&mod->req_wait_q, &wqe); in bfa_fcxp_put() 676 bfa_q_deq(&mod->rsp_wait_q, &wqe); in bfa_fcxp_put() 678 if (wqe) { in bfa_fcxp_put() 681 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, in bfa_fcxp_put() 682 wqe->nrsp_sgles, wqe->req_sga_cbfn, in bfa_fcxp_put() 683 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, in bfa_fcxp_put() 684 wqe->rsp_sglen_cbfn); in bfa_fcxp_put() 686 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); in bfa_fcxp_put() 1112 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, in bfa_fcxp_req_rsp_alloc_wait() argument [all …]
|
D | bfa_svc.h | 77 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, 79 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); 80 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); 417 struct bfa_reqq_wait_s wqe; /* request wait queue element */ member 616 void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 626 struct bfa_fcxp_wqe_s *wqe);
|
D | bfa.h | 97 bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), in bfa_reqq_winit() argument 100 wqe->qresume = qresume; in bfa_reqq_winit() 101 wqe->cbarg = cbarg; in bfa_reqq_winit()
|
D | bfa_core.c | 699 struct bfa_reqq_wait_s *wqe; in bfa_reqq_resume() local 710 wqe = (struct bfa_reqq_wait_s *) qe; in bfa_reqq_resume() 711 wqe->qresume(wqe->cbarg); in bfa_reqq_resume()
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
D | 57xx_hsi_bnx2fc.h | 639 __le16 wqe; member 782 __le16 wqe; member 834 __le16 wqe; member 898 __le32 wqe; member 913 __le16 wqe; member 992 __le16 wqe; member
|
D | bnx2fc_hwi.c | 625 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_process_unsol_compl() argument 644 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); in bnx2fc_process_unsol_compl() 645 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { in bnx2fc_process_unsol_compl() 647 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> in bnx2fc_process_unsol_compl() 872 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_process_cq_compl() argument 887 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; in bnx2fc_process_cq_compl() 997 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_alloc_work() argument 1006 work->wqe = wqe; in bnx2fc_alloc_work() 1017 u16 wqe; in bnx2fc_process_new_cqes() local 1035 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == in bnx2fc_process_new_cqes() [all …]
|
D | bnx2fc.h | 478 u16 wqe; member 574 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
|
D | bnx2fc_fcoe.c | 616 bnx2fc_process_cq_compl(work->tgt, work->wqe); in bnx2fc_percpu_io_thread() 2563 bnx2fc_process_cq_compl(work->tgt, work->wqe); in bnx2fc_percpu_thread_destroy()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_reqs.c | 804 struct ehca_wqe *wqe; in generate_flush_cqes() local 822 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); in generate_flush_cqes() 823 if (!wqe) { in generate_flush_cqes() 829 wc->wr_id = replace_wr_id(wqe->work_request_id, in generate_flush_cqes() 833 switch (wqe->optype) { in generate_flush_cqes() 845 wqe->optype); in generate_flush_cqes() 851 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) { in generate_flush_cqes() 852 wc->ex.imm_data = wqe->immediate_data; in generate_flush_cqes()
|
D | ehca_qp.c | 1076 struct ehca_wqe *wqe; in prepare_sqe_rts() local 1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); in prepare_sqe_rts() 1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) { in prepare_sqe_rts() 1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); in prepare_sqe_rts() 1109 wqe->nr_of_data_seg = 0; /* suppress data access */ in prepare_sqe_rts() 1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ in prepare_sqe_rts() 1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); in prepare_sqe_rts() 1121 wqe->wqef = 0; in prepare_sqe_rts() 1395 struct ehca_wqe *wqe; in internal_modify_qp() local 1400 wqe = (struct ehca_wqe *) in internal_modify_qp() [all …]
|
/linux-4.4.14/include/linux/mlx5/ |
D | qp.h | 422 } wqe; member
|
D | device.h | 522 } __packed wqe; member
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | cnic_defs.h | 2942 __le16 wqe; member 3085 __le16 wqe; member
|