Home
last modified time | relevance | path

Searched refs:cqe (Results 1 – 62 of 62) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_cq.c174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument
176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw()
181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument
186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw()
191 __be32 *cqe = cqe_ptr; in dump_cqe() local
193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe()
195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe()
196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe()
197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe()
[all …]
Dmthca_provider.c758 cq->resize_buf->cqe = entries - 1; in mthca_alloc_resize_buf()
781 if (entries == ibcq->cqe + 1) { in mthca_resize_cq()
804 cq->resize_buf->cqe); in mthca_resize_cq()
821 tcqe = cq->ibcq.cqe; in mthca_resize_cq()
823 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq()
826 tcqe = cq->resize_buf->cqe; in mthca_resize_cq()
835 ibcq->cqe = entries - 1; in mthca_resize_cq()
Dmthca_provider.h193 int cqe; member
Dmthca_dev.h510 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_ev.c52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in post_qp_event()
56 __func__, CQE_STATUS(rsp_msg->cqe), in post_qp_event()
57 CQE_QPID(rsp_msg->cqe)); in post_qp_event()
66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); in post_qp_event()
73 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), in post_qp_event()
74 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), in post_qp_event()
75 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); in post_qp_event()
118 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in iwch_ev_dispatch()
122 cqid, CQE_QPID(rsp_msg->cqe), in iwch_ev_dispatch()
123 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), in iwch_ev_dispatch()
[all …]
Diwch_cq.c48 struct t3_cqe cqe, *rd_cqe; in iwch_poll_cq_one() local
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one()
83 wc->vendor_err = CQE_STATUS(cqe); in iwch_poll_cq_one()
88 CQE_QPID(cqe), CQE_TYPE(cqe), in iwch_poll_cq_one()
89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), in iwch_poll_cq_one()
90 CQE_WRID_LOW(cqe), (unsigned long long) cookie); in iwch_poll_cq_one()
92 if (CQE_TYPE(cqe) == 0) { in iwch_poll_cq_one()
93 if (!CQE_STATUS(cqe)) in iwch_poll_cq_one()
94 wc->byte_len = CQE_LEN(cqe); in iwch_poll_cq_one()
98 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV || in iwch_poll_cq_one()
[all …]
Dcxio_hal.c75 struct t3_cqe *cqe; in cxio_hal_cq_op() local
109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op()
110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op()
352 struct t3_cqe cqe; in insert_recv_cqe() local
356 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
357 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_recv_cqe()
364 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; in insert_recv_cqe()
389 struct t3_cqe cqe; in insert_sq_cqe() local
393 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
394 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_sq_cqe()
[all …]
Dcxio_wr.h675 struct t3_cqe cqe; member
728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument
729 CQE_GENBIT(*cqe))
769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local
771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe()
772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe()
773 return cqe; in cxio_next_hw_cqe()
779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local
782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe()
783 return cqe; in cxio_next_sw_cqe()
[all …]
Dcxio_hal.h146 struct t3_cqe cqe; /* flits 2-3 */ member
195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
Diwch_provider.c191 chp->ibcq.cqe = 1 << chp->cq.size_log2; in iwch_create_cq()
245 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in iwch_resize_cq() argument
252 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe); in iwch_resize_cq()
255 if (cqe <= cq->cqe) in iwch_resize_cq()
259 cqe = roundup_pow_of_two(cqe+1); in iwch_resize_cq()
260 newcq.size_log2 = ilog2(cqe); in iwch_resize_cq()
263 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { in iwch_resize_cq()
293 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; in iwch_resize_cq()
Diwch_qp.c620 status = CQE_STATUS(rsp_msg->cqe); in build_term_codes()
621 opcode = CQE_OPCODE(rsp_msg->cqe); in build_term_codes()
622 rqtype = RQ_TYPE(rsp_msg->cqe); in build_term_codes()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dcq.c80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
186 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
262 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
293 cq->resize_buf->cqe = entries - 1; in mlx4_alloc_resize_buf()
[all …]
Dmlx4_ib.h100 int cqe; member
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dcq.c187 struct t4_cqe cqe; in insert_recv_cqe() local
191 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe()
192 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe()
197 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe()
198 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
220 struct t4_cqe cqe; in insert_sq_cqe() local
224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe()
225 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe()
230 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe()
231 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_sq_cqe()
[all …]
Dt4.h262 struct t4_cqe cqe; member
615 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() argument
617 return (CQE_GENBIT(cqe) == cq->gen); in t4_valid_cqe()
620 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() argument
639 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe()
659 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe() argument
666 *cqe = &cq->sw_queue[cq->sw_cidx]; in t4_next_cqe()
668 ret = t4_next_hw_cqe(cq, cqe); in t4_next_cqe()
Ddevice.c117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) in c4iw_log_wr_stats() argument
130 le.cqe_sge_ts = CQE_TS(cqe); in c4iw_log_wr_stats()
131 if (SQ_TYPE(cqe)) { in c4iw_log_wr_stats()
133 le.opcode = CQE_OPCODE(cqe); in c4iw_log_wr_stats()
136 le.wr_id = CQE_WRID_SQ_IDX(cqe); in c4iw_log_wr_stats()
142 le.wr_id = CQE_WRID_MSN(cqe); in c4iw_log_wr_stats()
Diw_cxgb4.h999 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
1035 extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dcq.c84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
91 return cqe; in get_sw_cqe()
120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
138 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
170 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
184 be32_to_cpu(cqe->srqn)); in handle_responder()
190 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter()
64 head = cq->ibcq.cqe; in ipath_cq_enter()
147 if (tail > (u32) cq->ibcq.cqe) in ipath_poll_cq()
148 tail = (u32) cq->ibcq.cqe; in ipath_poll_cq()
154 if (tail >= cq->ibcq.cqe) in ipath_poll_cq()
283 cq->ibcq.cqe = entries; in ipath_create_cq()
371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in ipath_resize_cq() argument
380 if (cqe < 1 || cqe > ib_ipath_max_cqes) { in ipath_resize_cq()
390 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in ipath_resize_cq()
392 sz += sizeof(struct ib_wc) * (cqe + 1); in ipath_resize_cq()
[all …]
Dipath_verbs.h818 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_cq.c66 if (head >= (unsigned) cq->ibcq.cqe) { in qib_cq_enter()
67 head = cq->ibcq.cqe; in qib_cq_enter()
154 if (tail > (u32) cq->ibcq.cqe) in qib_poll_cq()
155 tail = (u32) cq->ibcq.cqe; in qib_poll_cq()
161 if (tail >= cq->ibcq.cqe) in qib_poll_cq()
299 cq->ibcq.cqe = entries; in qib_create_cq()
387 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in qib_resize_cq() argument
396 if (cqe < 1 || cqe > ib_qib_max_cqes) { in qib_resize_cq()
406 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in qib_resize_cq()
408 sz += sizeof(struct ib_wc) * (cqe + 1); in qib_resize_cq()
[all …]
Dqib_verbs.h1020 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_reqs.c629 struct ehca_cqe *cqe; in ehca_poll_cq_one() local
636 cqe = (struct ehca_cqe *) in ehca_poll_cq_one()
638 if (!cqe) { in ehca_poll_cq_one()
650 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { in ehca_poll_cq_one()
655 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); in ehca_poll_cq_one()
659 my_cq->cq_number, cqe->local_qp_number); in ehca_poll_cq_one()
660 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", in ehca_poll_cq_one()
661 my_cq->cq_number, cqe->local_qp_number); in ehca_poll_cq_one()
672 cqe->local_qp_number, cqe->remote_qp_number); in ehca_poll_cq_one()
674 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", in ehca_poll_cq_one()
[all …]
Dehca_cq.c116 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, in ehca_create_cq() argument
134 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) in ehca_create_cq()
183 param.nr_cqe = cqe + additional_cqe; in ehca_create_cq()
261 my_cq->ib_cq.cqe = my_cq->nr_of_entries = in ehca_create_cq()
369 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in ehca_resize_cq() argument
Dipz_pt_fn.h144 struct ehca_cqe *cqe = ipz_qeit_get(queue); in ipz_qeit_is_valid() local
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1)); in ipz_qeit_is_valid()
Dehca_iverbs.h129 struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
135 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1063 ibcq->cqe = new_cnt; in ocrdma_resize_cq()
1074 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local
1076 cqe = cq->va; in ocrdma_flush_cq()
1084 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq()
1086 cqe++; in ocrdma_flush_cq()
1639 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local
1658 cqe = cq->va + cur_getp; in ocrdma_discard_cqes()
1663 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1669 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes()
1673 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> in ocrdma_discard_cqes()
[all …]
Docrdma.h479 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument
482 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid()
486 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument
488 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq()
492 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument
494 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated()
498 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument
500 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm()
504 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument
506 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
Docrdma_hw.c106 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) in ocrdma_get_mcqe() local
109 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) in ocrdma_get_mcqe()
111 return cqe; in ocrdma_get_mcqe()
655 struct ocrdma_ae_mcqe *cqe) in ocrdma_dispatch_ibevent() argument
664 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> in ocrdma_dispatch_ibevent()
667 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) in ocrdma_dispatch_ibevent()
668 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK]; in ocrdma_dispatch_ibevent()
669 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) in ocrdma_dispatch_ibevent()
670 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK]; in ocrdma_dispatch_ibevent()
759 struct ocrdma_ae_mcqe *cqe) in ocrdma_process_grp5_aync() argument
[all …]
Docrdma_verbs.h61 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Den_rx.c725 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument
732 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum()
734 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && in check_csum()
740 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) in check_csum()
743 else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum()
754 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local
778 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq()
781 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_rx_cq()
793 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_rx_cq()
796 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in mlx4_en_process_rx_cq()
[all …]
Den_tx.c391 struct mlx4_cqe *cqe; in mlx4_en_process_tx_cq() local
415 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
422 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_tx_cq()
430 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_tx_cq()
432 struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; in mlx4_en_process_tx_cq()
440 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; in mlx4_en_process_tx_cq()
446 timestamp = mlx4_en_get_cqe_ts(cqe); in mlx4_en_process_tx_cq()
465 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
Den_clock.c50 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) in mlx4_en_get_cqe_ts() argument
53 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; in mlx4_en_get_cqe_ts()
Dmlx4_en.h844 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
Den_netdev.c1572 struct mlx4_cqe *cqe = NULL; in mlx4_en_start_port() local
1574 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + in mlx4_en_start_port()
1576 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; in mlx4_en_start_port()
/linux-4.1.27/drivers/scsi/bnx2i/
Dbnx2i.h506 struct cqe { struct
650 struct cqe *cq_virt;
654 struct cqe *cq_prod_qe;
655 struct cqe *cq_cons_qe;
656 struct cqe *cq_first_qe;
657 struct cqe *cq_last_qe;
774 struct cqe cqe; member
881 struct cqe *cqe);
Dbnx2i_hwi.c1355 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() argument
1365 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp()
1400 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp()
1452 struct cqe *cqe) in bnx2i_process_login_resp() argument
1461 login = (struct bnx2i_login_response *) cqe; in bnx2i_process_login_resp()
1520 struct cqe *cqe) in bnx2i_process_text_resp() argument
1529 text = (struct bnx2i_text_response *) cqe; in bnx2i_process_text_resp()
1581 struct cqe *cqe) in bnx2i_process_tmf_resp() argument
1588 tmf_cqe = (struct bnx2i_tmf_response *)cqe; in bnx2i_process_tmf_resp()
1620 struct cqe *cqe) in bnx2i_process_logout_resp() argument
[all …]
Dbnx2i_init.c455 work->bnx2i_conn, &work->cqe); in bnx2i_percpu_thread_destroy()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
Dehea_main.c543 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) in ehea_check_cqe() argument
545 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe()
546 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe()
548 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe()
549 (cqe->header_length == 0)) in ehea_check_cqe()
555 struct sk_buff *skb, struct ehea_cqe *cqe, in ehea_fill_skb() argument
558 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb()
565 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb()
567 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb()
576 struct ehea_cqe *cqe) in get_skb_by_index() argument
[all …]
Dehea_qmr.h378 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_cmn.c354 struct eth_end_agg_rx_cqe *cqe) in bnx2x_update_sge_prod() argument
367 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); in bnx2x_update_sge_prod()
370 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
375 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod()
409 const struct eth_fast_path_rx_cqe *cqe, in bnx2x_get_rxhash() argument
414 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { in bnx2x_get_rxhash()
417 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; in bnx2x_get_rxhash()
422 return le32_to_cpu(cqe->rss_hash_result); in bnx2x_get_rxhash()
430 struct eth_fast_path_rx_cqe *cqe) in bnx2x_tpa_start() argument
473 le16_to_cpu(cqe->pars_flags.flags); in bnx2x_tpa_start()
[all …]
Dbnx2x_cmn.h776 union eth_rx_cqe *cqe; in bnx2x_has_rx_work() local
780 cqe = &fp->rx_comp_ring[cons]; in bnx2x_has_rx_work()
781 cqe_fp = &cqe->fast_path_cqe; in bnx2x_has_rx_work()
Dbnx2x_ethtool.c2400 union eth_rx_cqe *cqe; in bnx2x_run_loopback() local
2552 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; in bnx2x_run_loopback()
2553 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; in bnx2x_run_loopback()
2558 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len); in bnx2x_run_loopback()
2566 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; in bnx2x_run_loopback()
Dbnx2x_sp.h434 union event_ring_elem *cqe,
Dbnx2x.h909 #define BNX2X_RX_SUM_FIX(cqe) \ argument
910 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
Dbnx2x_sp.c1432 union event_ring_elem *cqe, in bnx2x_complete_vlan_mac() argument
1452 if (cqe->message.error) in bnx2x_complete_vlan_mac()
Dbnx2x_main.c1127 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; in bnx2x_panic_dump() local
1130 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); in bnx2x_panic_dump()
/linux-4.1.27/drivers/block/
Dnvme-core.c271 struct nvme_completion *cqe) in special_completion() argument
278 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion()
284 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion()
303 struct nvme_completion *cqe) in async_req_completion() argument
305 u32 result = le32_to_cpup(&cqe->result); in async_req_completion()
306 u16 status = le16_to_cpup(&cqe->status) >> 1; in async_req_completion()
316 struct nvme_completion *cqe) in abort_completion() argument
320 u16 status = le16_to_cpup(&cqe->status) >> 1; in abort_completion()
321 u32 result = le32_to_cpup(&cqe->result); in abort_completion()
330 struct nvme_completion *cqe) in async_completion() argument
[all …]
/linux-4.1.27/include/uapi/rdma/
Dib_user_verbs.h349 __u32 cqe; member
358 __u32 cqe; member
364 __u32 cqe; member
369 __u32 cqe; member
/linux-4.1.27/include/rdma/
Dib_verbs.h1224 int cqe; member
1564 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
1571 int (*resize_cq)(struct ib_cq *cq, int cqe,
2023 void *cq_context, int cqe, int comp_vector);
2032 int ib_resize_cq(struct ib_cq *cq, int cqe);
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_sli.h45 } cqe; member
Dlpfc_sli.c364 struct lpfc_cqe *cqe; in lpfc_sli4_cq_get() local
372 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) in lpfc_sli4_cq_get()
379 cqe = q->qe[q->hba_index].cqe; in lpfc_sli4_cq_get()
392 return cqe; in lpfc_sli4_cq_get()
422 temp_qe = q->qe[q->host_index].cqe; in lpfc_sli4_cq_release()
3495 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli_handle_slow_ring_event_s4()
6715 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { in lpfc_sli4_mbox_completions_pending()
6716 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; in lpfc_sli4_mbox_completions_pending()
11422 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); in lpfc_sli4_fcp_xri_abort_event_proc()
11451 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); in lpfc_sli4_els_xri_abort_event_proc()
[all …]
Dlpfc_sli4.h116 struct lpfc_cqe *cqe; member
Dlpfc_init.c853 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli4_free_sp_events()
4616 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { in lpfc_sli4_async_event_proc()
4619 &cq_event->cqe.acqe_link); in lpfc_sli4_async_event_proc()
4622 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); in lpfc_sli4_async_event_proc()
4626 &cq_event->cqe.acqe_dcbx); in lpfc_sli4_async_event_proc()
4630 &cq_event->cqe.acqe_grp5); in lpfc_sli4_async_event_proc()
4633 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); in lpfc_sli4_async_event_proc()
4636 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); in lpfc_sli4_async_event_proc()
4642 &cq_event->cqe.mcqe_cmpl)); in lpfc_sli4_async_event_proc()
7985 struct lpfc_cq_event *cqe; in lpfc_sli4_cq_event_release_all() local
[all …]
/linux-4.1.27/drivers/infiniband/core/
Dverbs.c1015 void *cq_context, int cqe, int comp_vector) in ib_create_cq() argument
1019 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); in ib_create_cq()
1050 int ib_resize_cq(struct ib_cq *cq, int cqe) in ib_resize_cq() argument
1053 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; in ib_resize_cq()
Duverbs_cmd.c1379 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, in ib_uverbs_create_cq()
1401 resp.cqe = cq->cqe; in ib_uverbs_create_cq()
1455 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); in ib_uverbs_resize_cq()
1459 resp.cqe = cq->cqe; in ib_uverbs_resize_cq()
1462 &resp, sizeof resp.cqe)) in ib_uverbs_resize_cq()
/linux-4.1.27/drivers/atm/
Dfirestream.c597 struct FS_QENTRY *cqe; in submit_qentry() local
609 cqe = bus_to_virt (wp); in submit_qentry()
610 if (qe != cqe) { in submit_qentry()
611 fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe); in submit_qentry()
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes_verbs.c1571 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; in nes_create_cq()
3641 struct nes_hw_cqe cqe; in nes_poll_cq() local
3669 cqe = nescq->hw_cq.cq_vbase[head]; in nes_poll_cq()
3670 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); in nes_poll_cq()
3674 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | in nes_poll_cq()
3680 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { in nes_poll_cq()
3683 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]); in nes_poll_cq()
3698 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { in nes_poll_cq()
3746 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); in nes_poll_cq()
3780 wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); in nes_poll_cq()
/linux-4.1.27/drivers/scsi/bnx2fc/
Dbnx2fc_hwi.c1014 struct fcoe_cqe *cqe; in bnx2fc_process_new_cqes() local
1033 cqe = &cq[cq_cons]; in bnx2fc_process_new_cqes()
1035 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == in bnx2fc_process_new_cqes()
1068 cqe++; in bnx2fc_process_new_cqes()
1074 cqe = cq; in bnx2fc_process_new_cqes()
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_cq.c300 cq->ibcq.cqe = entries - 1; in c2_init_cq()
/linux-4.1.27/drivers/net/ethernet/broadcom/
Dcnic.c2908 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) in cnic_l2_completion() local
2924 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; in cnic_l2_completion()
2925 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; in cnic_l2_completion()
2927 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); in cnic_l2_completion()
/linux-4.1.27/drivers/infiniband/ulp/srpt/
Dib_srpt.c2073 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, in srpt_create_ch_ib()