/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, in build_rdma_send() argument 51 wqe->send.rdmaop = T3_SEND_WITH_SE; in build_rdma_send() 53 wqe->send.rdmaop = T3_SEND; in build_rdma_send() 54 wqe->send.rem_stag = 0; in build_rdma_send() 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; in build_rdma_send() 60 wqe->send.rdmaop = T3_SEND_WITH_INV; in build_rdma_send() 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 68 wqe->send.reserved[0] = 0; in build_rdma_send() 69 wqe->send.reserved[1] = 0; in build_rdma_send() 70 wqe->send.reserved[2] = 0; in build_rdma_send() [all …]
|
D | cxio_hal.c | 140 struct t3_modify_qp_wr *wqe; in cxio_hal_clear_qp_ctx() local 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_clear_qp_ctx() 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); in cxio_hal_clear_qp_ctx() 147 memset(wqe, 0, sizeof(*wqe)); in cxio_hal_clear_qp_ctx() 148 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, in cxio_hal_clear_qp_ctx() 151 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); in cxio_hal_clear_qp_ctx() 153 wqe->sge_cmd = cpu_to_be64(sge_cmd); in cxio_hal_clear_qp_ctx() 519 struct t3_modify_qp_wr *wqe; in cxio_hal_init_ctrl_qp() local 522 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); in cxio_hal_init_ctrl_qp() 565 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); in cxio_hal_init_ctrl_qp() [all …]
|
D | cxio_wr.h | 416 static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe) in fw_riwrh_opcode() argument 418 return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags)); in fw_riwrh_opcode() 427 static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op, in build_fw_riwrh() argument 431 wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) | in build_fw_riwrh() 435 wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) | in build_fw_riwrh() 439 ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit); in build_fw_riwrh()
|
D | cxio_dbg.c | 111 void cxio_dump_wqe(union t3_wr *wqe) in cxio_dump_wqe() argument 113 __be64 *data = (__be64 *)wqe; in cxio_dump_wqe()
|
D | cxio_hal.h | 205 void cxio_dump_wqe(union t3_wr *wqe);
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | qp.c | 434 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, in build_rdma_send() argument 446 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 449 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 451 wqe->send.stag_inv = 0; in build_rdma_send() 455 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 458 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send() 460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send() 466 wqe->send.r3 = 0; in build_rdma_send() 467 wqe->send.r4 = 0; in build_rdma_send() 472 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send() [all …]
|
D | t4.h | 108 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, in init_wr_hdr() argument 111 wqe->send.opcode = (u8)opcode; in init_wr_hdr() 112 wqe->send.flags = flags; in init_wr_hdr() 113 wqe->send.wrid = wrid; in init_wr_hdr() 114 wqe->send.r1[0] = 0; in init_wr_hdr() 115 wqe->send.r1[1] = 0; in init_wr_hdr() 116 wqe->send.r1[2] = 0; in init_wr_hdr() 117 wqe->send.len16 = len16; in init_wr_hdr() 455 union t4_wr *wqe) in t4_ring_sq_db() argument 461 if (inc == 1 && wqe) { in t4_ring_sq_db() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_rc.c | 43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, in restart_sge() argument 48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; in restart_sge() 49 ss->sge = wqe->sg_list[0]; in restart_sge() 50 ss->sg_list = wqe->sg_list + 1; in restart_sge() 51 ss->num_sge = wqe->wr.num_sge; in restart_sge() 52 ss->total_len = wqe->length; in restart_sge() 54 return wqe->length - len; in restart_sge() 236 struct qib_swqe *wqe; in qib_make_rc_req() local 273 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_rc_req() 274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req() [all …]
|
D | qib_ruc.c | 81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) in qib_init_sge() argument 94 for (i = j = 0; i < wqe->num_sge; i++) { in qib_init_sge() 95 if (wqe->sg_list[i].length == 0) in qib_init_sge() 99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in qib_init_sge() 101 qp->r_len += wqe->sg_list[i].length; in qib_init_sge() 117 wc.wr_id = wqe->wr_id; in qib_init_sge() 144 struct qib_rwqe *wqe; in qib_get_rwqe() local 176 wqe = get_rwqe_ptr(rq, tail); in qib_get_rwqe() 185 if (!wr_id_only && !qib_init_sge(qp, wqe)) { in qib_get_rwqe() 189 qp->r_wr_id = wqe->wr_id; in qib_get_rwqe() [all …]
|
D | qib_uc.c | 49 struct qib_swqe *wqe; in qib_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_uc_req() 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); in qib_make_uc_req() 97 wqe->psn = qp->s_next_psn; in qib_make_uc_req() 99 qp->s_sge.sge = wqe->sg_list[0]; in qib_make_uc_req() 100 qp->s_sge.sg_list = wqe->sg_list + 1; in qib_make_uc_req() 101 qp->s_sge.num_sge = wqe->wr.num_sge; in qib_make_uc_req() 102 qp->s_sge.total_len = wqe->length; in qib_make_uc_req() 103 len = wqe->length; in qib_make_uc_req() [all …]
|
D | qib_ud.c | 241 struct qib_swqe *wqe; in qib_make_ud_req() local 264 wqe = get_swqe_ptr(qp, qp->s_last); in qib_make_ud_req() 265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in qib_make_ud_req() 272 wqe = get_swqe_ptr(qp, qp->s_cur); in qib_make_ud_req() 280 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; in qib_make_ud_req() 303 qib_ud_loopback(qp, wqe); in qib_make_ud_req() 305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); in qib_make_ud_req() 311 extra_bytes = -wqe->length & 3; in qib_make_ud_req() 312 nwords = (wqe->length + extra_bytes) >> 2; in qib_make_ud_req() 316 qp->s_cur_size = wqe->length; in qib_make_ud_req() [all …]
|
D | qib_srq.c | 57 struct qib_rwqe *wqe; in qib_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in qib_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in qib_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in qib_post_srq_receive() 282 struct qib_rwqe *wqe; in qib_modify_srq() local 285 wqe = get_rwqe_ptr(&srq->rq, tail); in qib_modify_srq() 286 p->wr_id = wqe->wr_id; in qib_modify_srq() 287 p->num_sge = wqe->num_sge; in qib_modify_srq() 288 for (i = 0; i < wqe->num_sge; i++) in qib_modify_srq() [all …]
|
D | qib_verbs.c | 340 struct qib_swqe *wqe; in qib_post_one_send() local 399 wqe = get_swqe_ptr(qp, qp->s_head); in qib_post_one_send() 400 wqe->wr = *wr; in qib_post_one_send() 401 wqe->length = 0; in qib_post_one_send() 412 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], in qib_post_one_send() 416 wqe->length += length; in qib_post_one_send() 419 wqe->wr.num_sge = j; in qib_post_one_send() 423 if (wqe->length > 0x80000000U) in qib_post_one_send() 425 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + in qib_post_one_send() 430 wqe->ssn = qp->s_ssn++; in qib_post_one_send() [all …]
|
D | qib_qp.c | 429 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); in clear_mr_refs() local 432 for (i = 0; i < wqe->wr.num_sge; i++) { in clear_mr_refs() 433 struct qib_sge *sge = &wqe->sg_list[i]; in clear_mr_refs() 440 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount); in clear_mr_refs() 1367 struct qib_swqe *wqe; in qib_qp_iter_print() local 1370 wqe = get_swqe_ptr(qp, qp->s_last); in qib_qp_iter_print() 1377 wqe->wr.opcode, in qib_qp_iter_print() 1383 wqe->ssn, in qib_qp_iter_print()
|
D | qib_verbs.h | 1102 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
|
D | qib.h | 262 struct qib_swqe *wqe; member
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_ruc.c | 123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, in ipath_init_sge() argument 130 for (i = j = 0; i < wqe->num_sge; i++) { in ipath_init_sge() 131 if (wqe->sg_list[i].length == 0) in ipath_init_sge() 135 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) in ipath_init_sge() 137 *lengthp += wqe->sg_list[i].length; in ipath_init_sge() 146 wc.wr_id = wqe->wr_id; in ipath_init_sge() 172 struct ipath_rwqe *wqe; in ipath_get_rwqe() local 205 wqe = get_rwqe_ptr(rq, tail); in ipath_get_rwqe() 211 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); in ipath_get_rwqe() 212 qp->r_wr_id = wqe->wr_id; in ipath_get_rwqe() [all …]
|
D | ipath_rc.c | 42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, in restart_sge() argument 47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; in restart_sge() 48 ss->sge = wqe->sg_list[0]; in restart_sge() 49 ss->sg_list = wqe->sg_list + 1; in restart_sge() 50 ss->num_sge = wqe->wr.num_sge; in restart_sge() 52 return wqe->length - len; in restart_sge() 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) in ipath_init_restart() argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, in ipath_init_restart() 218 struct ipath_swqe *wqe; in ipath_make_rc_req() local 256 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_rc_req() [all …]
|
D | ipath_uc.c | 49 struct ipath_swqe *wqe; in ipath_make_uc_req() local 70 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_uc_req() 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_uc_req() 84 wqe = get_swqe_ptr(qp, qp->s_cur); in ipath_make_uc_req() 97 qp->s_psn = wqe->psn = qp->s_next_psn; in ipath_make_uc_req() 98 qp->s_sge.sge = wqe->sg_list[0]; in ipath_make_uc_req() 99 qp->s_sge.sg_list = wqe->sg_list + 1; in ipath_make_uc_req() 100 qp->s_sge.num_sge = wqe->wr.num_sge; in ipath_make_uc_req() 101 qp->s_len = len = wqe->length; in ipath_make_uc_req() 102 switch (wqe->wr.opcode) { in ipath_make_uc_req() [all …]
|
D | ipath_ud.c | 61 struct ipath_rwqe *wqe; in ipath_ud_loopback() local 133 wqe = get_rwqe_ptr(rq, tail); in ipath_ud_loopback() 135 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { in ipath_ud_loopback() 149 wc.wr_id = wqe->wr_id; in ipath_ud_loopback() 246 struct ipath_swqe *wqe; in ipath_make_ud_req() local 269 wqe = get_swqe_ptr(qp, qp->s_last); in ipath_make_ud_req() 270 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); in ipath_make_ud_req() 277 wqe = get_swqe_ptr(qp, qp->s_cur); in ipath_make_ud_req() 283 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; in ipath_make_ud_req() 306 ipath_ud_loopback(qp, wqe); in ipath_make_ud_req() [all …]
|
D | ipath_srq.c | 57 struct ipath_rwqe *wqe; in ipath_post_srq_receive() local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 80 wqe->wr_id = wr->wr_id; in ipath_post_srq_receive() 81 wqe->num_sge = wr->num_sge; in ipath_post_srq_receive() 83 wqe->sg_list[i] = wr->sg_list[i]; in ipath_post_srq_receive() 286 struct ipath_rwqe *wqe; in ipath_modify_srq() local 289 wqe = get_rwqe_ptr(&srq->rq, tail); in ipath_modify_srq() 290 p->wr_id = wqe->wr_id; in ipath_modify_srq() 291 p->num_sge = wqe->num_sge; in ipath_modify_srq() 292 for (i = 0; i < wqe->num_sge; i++) in ipath_modify_srq() [all …]
|
D | ipath_verbs.c | 338 struct ipath_swqe *wqe; in ipath_post_one_send() local 397 wqe = get_swqe_ptr(qp, qp->s_head); in ipath_post_one_send() 398 wqe->wr = *wr; in ipath_post_one_send() 399 wqe->length = 0; in ipath_post_one_send() 409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], in ipath_post_one_send() 413 wqe->length += length; in ipath_post_one_send() 416 wqe->wr.num_sge = j; in ipath_post_one_send() 420 if (wqe->length > 0x80000000U) in ipath_post_one_send() 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) in ipath_post_one_send() 424 wqe->ssn = qp->s_ssn++; in ipath_post_one_send() [all …]
|
D | ipath_verbs.h | 647 struct ipath_swqe *wqe; member 857 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, 871 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | odp.c | 318 struct mlx5_ib_pfault *pfault, void *wqe, in pagefault_data_segments() argument 331 wqe += sizeof(struct mlx5_wqe_srq_next_seg); in pagefault_data_segments() 338 while (wqe < wqe_end) { in pagefault_data_segments() 339 struct mlx5_wqe_data_seg *dseg = wqe; in pagefault_data_segments() 349 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt, in pagefault_data_segments() 352 wqe += sizeof(*dseg); in pagefault_data_segments() 392 void **wqe, void **wqe_end, int wqe_length) in mlx5_ib_mr_initiator_pfault_handler() argument 395 struct mlx5_wqe_ctrl_seg *ctrl = *wqe; in mlx5_ib_mr_initiator_pfault_handler() 396 u16 wqe_index = pfault->mpfault.wqe.wqe_index; in mlx5_ib_mr_initiator_pfault_handler() 436 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS; in mlx5_ib_mr_initiator_pfault_handler() [all …]
|
D | qp.c | 2076 static u8 calc_sig(void *wqe, int size) in calc_sig() argument 2078 u8 *p = wqe; in calc_sig() 2088 static u8 wq_sig(void *wqe) in wq_sig() argument 2090 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); in wq_sig() 2094 void *wqe, int *sz) in set_data_inl_seg() argument 2104 seg = wqe; in set_data_inl_seg() 2105 wqe += sizeof(*seg); in set_data_inl_seg() 2114 if (unlikely(wqe + len > qend)) { in set_data_inl_seg() 2115 copy = qend - wqe; in set_data_inl_seg() 2116 memcpy(wqe, addr, copy); in set_data_inl_seg() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_srq.c | 90 static inline int *wqe_to_link(void *wqe) in wqe_to_link() argument 92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); in wqe_to_link() 151 void *wqe; in mthca_alloc_srq_buf() local 178 next = wqe = get_wqe(srq, i); in mthca_alloc_srq_buf() 181 *wqe_to_link(wqe) = i + 1; in mthca_alloc_srq_buf() 184 *wqe_to_link(wqe) = -1; in mthca_alloc_srq_buf() 188 for (scatter = wqe + sizeof (struct mthca_next_seg); in mthca_alloc_srq_buf() 189 (void *) scatter < wqe + (1 << srq->wqe_shift); in mthca_alloc_srq_buf() 487 void *wqe; in mthca_tavor_post_srq_recv() local 496 wqe = get_wqe(srq, ind); in mthca_tavor_post_srq_recv() [all …]
|
D | mthca_qp.c | 1607 void *wqe; in mthca_tavor_post_send() local 1643 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send() 1645 qp->sq.last = wqe; in mthca_tavor_post_send() 1647 ((struct mthca_next_seg *) wqe)->nda_op = 0; in mthca_tavor_post_send() 1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0; in mthca_tavor_post_send() 1649 ((struct mthca_next_seg *) wqe)->flags = in mthca_tavor_post_send() 1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; in mthca_tavor_post_send() 1659 wqe += sizeof (struct mthca_next_seg); in mthca_tavor_post_send() 1667 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, in mthca_tavor_post_send() 1669 wqe += sizeof (struct mthca_raddr_seg); in mthca_tavor_post_send() [all …]
|
D | mthca_cq.c | 126 __be32 wqe; member 140 __be32 wqe; member 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); in mthca_cq_clean() 388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), in handle_error_cqe() 477 cqe->wqe = new_wqe; in handle_error_cqe() 511 be32_to_cpu(cqe->wqe)); in mthca_poll_one() 540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) in mthca_poll_one() 546 u32 wqe = be32_to_cpu(cqe->wqe); in mthca_poll_one() local 548 wqe_index = wqe >> srq->wqe_shift; in mthca_poll_one() 550 mthca_free_srq_wqe(srq, wqe); in mthca_poll_one() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | qp.c | 209 __be32 *wqe; in stamp_send_wqe() local 224 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe() 225 *wqe = stamp; in stamp_send_wqe() 231 wqe = buf + i; in stamp_send_wqe() 232 *wqe = cpu_to_be32(0xffffffff); in stamp_send_wqe() 241 void *wqe; in post_nop_wqe() local 244 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe() 248 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; in post_nop_wqe() 257 inl = wqe + s; in post_nop_wqe() 2035 void *wqe, unsigned *mlx_seg_len) in build_sriov_qp0_header() argument [all …]
|
/linux-4.1.27/drivers/infiniband/hw/nes/ |
D | nes_verbs.c | 219 struct nes_hw_qp_wqe *wqe; in nes_bind_mw() local 239 wqe = &nesqp->hwqp.sq_vbase[head]; in nes_bind_mw() 241 nes_fill_init_qp_wqe(wqe, nesqp, head); in nes_bind_mw() 243 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp); in nes_bind_mw() 256 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); in nes_bind_mw() 257 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, in nes_bind_mw() 259 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); in nes_bind_mw() 260 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, in nes_bind_mw() 262 wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; in nes_bind_mw() 264 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp); in nes_bind_mw() [all …]
|
D | nes.h | 335 nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head) in nes_fill_init_qp_wqe() argument 339 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX, in nes_fill_init_qp_wqe() 341 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value); in nes_fill_init_qp_wqe()
|
D | nes_cm.c | 777 struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; in build_rdma0_msg() local 781 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); in build_rdma0_msg() 783 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; in build_rdma0_msg() 784 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; in build_rdma0_msg() 789 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = in build_rdma0_msg() 791 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; in build_rdma0_msg() 792 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; in build_rdma0_msg() 793 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; in build_rdma0_msg() 802 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = in build_rdma0_msg() 804 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX] = 1; in build_rdma0_msg() [all …]
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_sli.c | 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) in lpfc_sli4_wq_put() argument 106 temp_wqe = q->qe[q->host_index].wqe; in lpfc_sli4_wq_put() 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); in lpfc_sli4_wq_put() 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); in lpfc_sli4_wq_put() 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); in lpfc_sli4_wq_put() 8186 union lpfc_wqe *wqe) in lpfc_sli4_iocb2wqe() argument 8215 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); in lpfc_sli4_iocb2wqe() 8218 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ in lpfc_sli4_iocb2wqe() 8219 wqe->generic.wqe_com.word10 = 0; in lpfc_sli4_iocb2wqe() 8230 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); in lpfc_sli4_iocb2wqe() [all …]
|
D | lpfc_sli4.h | 123 union lpfc_wqe *wqe; member
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfa_svc.c | 671 struct bfa_fcxp_wqe_s *wqe; in bfa_fcxp_put() local 674 bfa_q_deq(&mod->req_wait_q, &wqe); in bfa_fcxp_put() 676 bfa_q_deq(&mod->rsp_wait_q, &wqe); in bfa_fcxp_put() 678 if (wqe) { in bfa_fcxp_put() 681 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, in bfa_fcxp_put() 682 wqe->nrsp_sgles, wqe->req_sga_cbfn, in bfa_fcxp_put() 683 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn, in bfa_fcxp_put() 684 wqe->rsp_sglen_cbfn); in bfa_fcxp_put() 686 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp); in bfa_fcxp_put() 1112 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, in bfa_fcxp_req_rsp_alloc_wait() argument [all …]
|
D | bfa_svc.h | 77 void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, 79 void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); 80 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); 417 struct bfa_reqq_wait_s wqe; /* request wait queue element */ member 616 void bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, 626 struct bfa_fcxp_wqe_s *wqe);
|
D | bfa.h | 97 bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), in bfa_reqq_winit() argument 100 wqe->qresume = qresume; in bfa_reqq_winit() 101 wqe->cbarg = cbarg; in bfa_reqq_winit()
|
D | bfa_core.c | 699 struct bfa_reqq_wait_s *wqe; in bfa_reqq_resume() local 710 wqe = (struct bfa_reqq_wait_s *) qe; in bfa_reqq_resume() 711 wqe->qresume(wqe->cbarg); in bfa_reqq_resume()
|
/linux-4.1.27/drivers/scsi/bnx2fc/ |
D | 57xx_hsi_bnx2fc.h | 639 __le16 wqe; member 782 __le16 wqe; member 834 __le16 wqe; member 898 __le32 wqe; member 913 __le16 wqe; member 992 __le16 wqe; member
|
D | bnx2fc_hwi.c | 625 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_process_unsol_compl() argument 644 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); in bnx2fc_process_unsol_compl() 645 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { in bnx2fc_process_unsol_compl() 647 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> in bnx2fc_process_unsol_compl() 872 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_process_cq_compl() argument 887 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; in bnx2fc_process_cq_compl() 997 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) in bnx2fc_alloc_work() argument 1006 work->wqe = wqe; in bnx2fc_alloc_work() 1017 u16 wqe; in bnx2fc_process_new_cqes() local 1035 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == in bnx2fc_process_new_cqes() [all …]
|
D | bnx2fc.h | 479 u16 wqe; member 575 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
|
D | bnx2fc_fcoe.c | 616 bnx2fc_process_cq_compl(work->tgt, work->wqe); in bnx2fc_percpu_io_thread() 2493 bnx2fc_process_cq_compl(work->tgt, work->wqe); in bnx2fc_percpu_thread_destroy()
|
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/ |
D | qp.c | 144 pfault.wqe.wqe_index = in mlx5_eq_pagefault() 145 be16_to_cpu(pf_eqe->wqe.wqe_index); in mlx5_eq_pagefault() 146 pfault.wqe.packet_size = in mlx5_eq_pagefault() 147 be16_to_cpu(pf_eqe->wqe.packet_length); in mlx5_eq_pagefault() 150 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_reqs.c | 803 struct ehca_wqe *wqe; in generate_flush_cqes() local 821 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); in generate_flush_cqes() 822 if (!wqe) { in generate_flush_cqes() 828 wc->wr_id = replace_wr_id(wqe->work_request_id, in generate_flush_cqes() 832 switch (wqe->optype) { in generate_flush_cqes() 844 wqe->optype); in generate_flush_cqes() 850 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) { in generate_flush_cqes() 851 wc->ex.imm_data = wqe->immediate_data; in generate_flush_cqes()
|
D | ehca_qp.c | 1076 struct ehca_wqe *wqe; in prepare_sqe_rts() local 1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); in prepare_sqe_rts() 1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) { in prepare_sqe_rts() 1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); in prepare_sqe_rts() 1109 wqe->nr_of_data_seg = 0; /* suppress data access */ in prepare_sqe_rts() 1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ in prepare_sqe_rts() 1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); in prepare_sqe_rts() 1121 wqe->wqef = 0; in prepare_sqe_rts() 1395 struct ehca_wqe *wqe; in internal_modify_qp() local 1400 wqe = (struct ehca_wqe *) in internal_modify_qp() [all …]
|
/linux-4.1.27/include/linux/mlx5/ |
D | qp.h | 397 } wqe; member
|
D | device.h | 495 } __packed wqe; member
|
/linux-4.1.27/drivers/net/ethernet/broadcom/ |
D | cnic_defs.h | 2942 __le16 wqe; member 3085 __le16 wqe; member
|