| /linux-4.4.14/drivers/infiniband/hw/mthca/ |
| D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]), in dump_cqe() 196 be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]), in dump_cqe() 197 be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7])); in dump_cqe() [all …]
|
| D | mthca_provider.c | 653 int entries = attr->cqe; in mthca_create_cq() 766 cq->resize_buf->cqe = entries - 1; in mthca_alloc_resize_buf() 789 if (entries == ibcq->cqe + 1) { in mthca_resize_cq() 812 cq->resize_buf->cqe); in mthca_resize_cq() 829 tcqe = cq->ibcq.cqe; in mthca_resize_cq() 831 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq() 834 tcqe = cq->resize_buf->cqe; in mthca_resize_cq() 843 ibcq->cqe = entries - 1; in mthca_resize_cq()
|
| D | mthca_provider.h | 193 int cqe; member
|
| D | mthca_dev.h | 510 void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
|
| /linux-4.4.14/drivers/infiniband/hw/mlx4/ |
| D | cq.c | 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 175 int entries = attr->cqe; in mlx4_ib_create_cq() 193 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq() 271 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq() [all …]
|
| D | mlx4_ib.h | 113 int cqe; member
|
| D | mad.c | 1831 cq_attr.cqe = cq_size; in create_pv_resources()
|
| D | main.c | 1201 cq_attr.cqe = 1; in mlx4_ib_alloc_xrcd()
|
| /linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
| D | iwch_ev.c | 52 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in post_qp_event() 56 __func__, CQE_STATUS(rsp_msg->cqe), in post_qp_event() 57 CQE_QPID(rsp_msg->cqe)); in post_qp_event() 66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); in post_qp_event() 73 CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe), in post_qp_event() 74 CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe), in post_qp_event() 75 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe)); in post_qp_event() 118 qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); in iwch_ev_dispatch() 122 cqid, CQE_QPID(rsp_msg->cqe), in iwch_ev_dispatch() 123 CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe), in iwch_ev_dispatch() [all …]
|
| D | iwch_cq.c | 48 struct t3_cqe cqe, *rd_cqe; in iwch_poll_cq_one() local 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one() 83 wc->vendor_err = CQE_STATUS(cqe); in iwch_poll_cq_one() 88 CQE_QPID(cqe), CQE_TYPE(cqe), in iwch_poll_cq_one() 89 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe), in iwch_poll_cq_one() 90 CQE_WRID_LOW(cqe), (unsigned long long) cookie); in iwch_poll_cq_one() 92 if (CQE_TYPE(cqe) == 0) { in iwch_poll_cq_one() 93 if (!CQE_STATUS(cqe)) in iwch_poll_cq_one() 94 wc->byte_len = CQE_LEN(cqe); in iwch_poll_cq_one() 98 if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV || in iwch_poll_cq_one() [all …]
|
| D | cxio_hal.c | 75 struct t3_cqe *cqe; in cxio_hal_cq_op() local 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op() 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op() 352 struct t3_cqe cqe; in insert_recv_cqe() local 356 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 357 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_recv_cqe() 364 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; in insert_recv_cqe() 389 struct t3_cqe cqe; in insert_sq_cqe() local 393 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 394 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) | in insert_sq_cqe() [all …]
|
| D | cxio_wr.h | 675 struct t3_cqe cqe; member 728 #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ argument 729 CQE_GENBIT(*cqe)) 769 struct t3_cqe *cqe; in cxio_next_hw_cqe() local 771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe() 772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe() 773 return cqe; in cxio_next_hw_cqe() 779 struct t3_cqe *cqe; in cxio_next_sw_cqe() local 782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe() 783 return cqe; in cxio_next_sw_cqe() [all …]
|
| D | iwch_provider.c | 150 int entries = attr->cqe; in iwch_create_cq() 200 chp->ibcq.cqe = 1 << chp->cq.size_log2; in iwch_create_cq() 254 static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in iwch_resize_cq() argument 261 PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe); in iwch_resize_cq() 264 if (cqe <= cq->cqe) in iwch_resize_cq() 268 cqe = roundup_pow_of_two(cqe+1); in iwch_resize_cq() 269 newcq.size_log2 = ilog2(cqe); in iwch_resize_cq() 272 if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { in iwch_resize_cq() 302 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; in iwch_resize_cq()
|
| D | cxio_hal.h | 146 struct t3_cqe cqe; /* flits 2-3 */ member 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
| D | iwch_qp.c | 621 status = CQE_STATUS(rsp_msg->cqe); in build_term_codes() 622 opcode = CQE_OPCODE(rsp_msg->cqe); in build_term_codes() 623 rqtype = RQ_TYPE(rsp_msg->cqe); in build_term_codes()
|
| /linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
| D | en_rx.c | 97 static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) in mlx5e_lro_update_hdr() argument 104 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); in mlx5e_lro_update_hdr() 108 u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; in mlx5e_lro_update_hdr() 122 if (get_cqe_lro_tcppsh(cqe)) in mlx5e_lro_update_hdr() 127 tcp->ack_seq = cqe->lro_ack_seq_num; in mlx5e_lro_update_hdr() 128 tcp->window = cqe->lro_tcp_win; in mlx5e_lro_update_hdr() 132 ipv4->ttl = cqe->lro_min_ttl; in mlx5e_lro_update_hdr() 138 ipv6->hop_limit = cqe->lro_min_ttl; in mlx5e_lro_update_hdr() 144 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, in mlx5e_skb_set_hash() argument 147 u8 cht = cqe->rss_hash_type; in mlx5e_skb_set_hash() [all …]
|
| D | en_txrx.c | 39 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_get_cqe() local 40 int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK; in mlx5e_get_cqe() 49 return cqe; in mlx5e_get_cqe()
|
| D | en_tx.c | 342 struct mlx5_cqe64 *cqe; in mlx5e_poll_tx_cq() local 346 cqe = mlx5e_get_cqe(cq); in mlx5e_poll_tx_cq() 347 if (!cqe) in mlx5e_poll_tx_cq() 352 wqe_counter = be16_to_cpu(cqe->wqe_counter); in mlx5e_poll_tx_cq()
|
| D | en_main.c | 778 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); in mlx5e_create_cq() local 780 cqe->op_own = 0xf1; in mlx5e_create_cq()
|
| /linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
| D | cq.c | 185 struct t4_cqe cqe; in insert_recv_cqe() local 189 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 190 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 195 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 196 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 218 struct t4_cqe cqe; in insert_sq_cqe() local 222 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 223 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_sq_cqe() 228 CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; in insert_sq_cqe() 229 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_sq_cqe() [all …]
|
| D | t4.h | 263 struct t4_cqe cqe; member 632 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) in t4_valid_cqe() argument 634 return (CQE_GENBIT(cqe) == cq->gen); in t4_valid_cqe() 637 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_hw_cqe() argument 656 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe() 676 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) in t4_next_cqe() argument 683 *cqe = &cq->sw_queue[cq->sw_cidx]; in t4_next_cqe() 685 ret = t4_next_hw_cqe(cq, cqe); in t4_next_cqe()
|
| D | device.c | 117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) in c4iw_log_wr_stats() argument 130 le.cqe_sge_ts = CQE_TS(cqe); in c4iw_log_wr_stats() 131 if (SQ_TYPE(cqe)) { in c4iw_log_wr_stats() 133 le.opcode = CQE_OPCODE(cqe); in c4iw_log_wr_stats() 136 le.wr_id = CQE_WRID_SQ_IDX(cqe); in c4iw_log_wr_stats() 142 le.wr_id = CQE_WRID_MSN(cqe); in c4iw_log_wr_stats()
|
| D | iw_cxgb4.h | 988 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 1027 extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
|
| /linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
| D | pci.h | 130 MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); 138 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 143 MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16); 150 MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); 155 MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8); 161 MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); 166 MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); 172 MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); 177 MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); 182 MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
|
| D | pci.c | 644 char *cqe) in mlxsw_pci_cqe_sdq_handle() argument 669 char *cqe) in mlxsw_pci_cqe_rdq_handle() argument 690 if (mlxsw_pci_cqe_lag_get(cqe)) in mlxsw_pci_cqe_rdq_handle() 693 rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe); in mlxsw_pci_cqe_rdq_handle() 694 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); in mlxsw_pci_cqe_rdq_handle() 696 byte_count = mlxsw_pci_cqe_byte_count_get(cqe); in mlxsw_pci_cqe_rdq_handle() 697 if (mlxsw_pci_cqe_crc_get(cqe)) in mlxsw_pci_cqe_rdq_handle() 726 char *cqe; in mlxsw_pci_cq_tasklet() local 730 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { in mlxsw_pci_cq_tasklet() 731 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); in mlxsw_pci_cq_tasklet() [all …]
|
| /linux-4.4.14/drivers/infiniband/hw/mlx5/ |
| D | cq.c | 85 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 88 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 91 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 92 return cqe; in get_sw_cqe() 121 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 125 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 139 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 171 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 185 be32_to_cpu(cqe->srqn)); in handle_responder() 191 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder() [all …]
|
| D | main.c | 1099 cq_attr.cqe = 128; in create_umr_res() 1189 struct ib_cq_init_attr cq_attr = {.cqe = 1}; in create_dev_resources()
|
| /linux-4.4.14/drivers/staging/rdma/ipath/ |
| D | ipath_cq.c | 63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter() 64 head = cq->ibcq.cqe; in ipath_cq_enter() 147 if (tail > (u32) cq->ibcq.cqe) in ipath_poll_cq() 148 tail = (u32) cq->ibcq.cqe; in ipath_poll_cq() 154 if (tail >= cq->ibcq.cqe) in ipath_poll_cq() 205 int entries = attr->cqe; in ipath_create_cq() 288 cq->ibcq.cqe = entries; in ipath_create_cq() 376 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in ipath_resize_cq() argument 385 if (cqe < 1 || cqe > ib_ipath_max_cqes) { in ipath_resize_cq() 395 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in ipath_resize_cq() [all …]
|
| D | ipath_verbs.h | 827 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
| /linux-4.4.14/drivers/infiniband/hw/qib/ |
| D | qib_cq.c | 66 if (head >= (unsigned) cq->ibcq.cqe) { in qib_cq_enter() 67 head = cq->ibcq.cqe; in qib_cq_enter() 154 if (tail > (u32) cq->ibcq.cqe) in qib_poll_cq() 155 tail = (u32) cq->ibcq.cqe; in qib_poll_cq() 161 if (tail >= cq->ibcq.cqe) in qib_poll_cq() 220 int entries = attr->cqe; in qib_create_cq() 304 cq->ibcq.cqe = entries; in qib_create_cq() 392 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in qib_resize_cq() argument 401 if (cqe < 1 || cqe > ib_qib_max_cqes) { in qib_resize_cq() 411 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in qib_resize_cq() [all …]
|
| D | qib_verbs.h | 1031 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
| /linux-4.4.14/drivers/staging/rdma/hfi1/ |
| D | cq.c | 82 if (head >= (unsigned) cq->ibcq.cqe) { in hfi1_cq_enter() 83 head = cq->ibcq.cqe; in hfi1_cq_enter() 170 if (tail > (u32) cq->ibcq.cqe) in hfi1_poll_cq() 171 tail = (u32) cq->ibcq.cqe; in hfi1_poll_cq() 177 if (tail >= cq->ibcq.cqe) in hfi1_poll_cq() 242 unsigned int entries = attr->cqe; in hfi1_create_cq() 317 cq->ibcq.cqe = entries; in hfi1_create_cq() 405 int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in hfi1_resize_cq() argument 414 if (cqe < 1 || cqe > hfi1_max_cqes) { in hfi1_resize_cq() 424 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); in hfi1_resize_cq() [all …]
|
| D | verbs.h | 1011 int hfi1_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
|
| /linux-4.4.14/drivers/staging/rdma/ehca/ |
| D | ehca_reqs.c | 630 struct ehca_cqe *cqe; in ehca_poll_cq_one() local 637 cqe = (struct ehca_cqe *) in ehca_poll_cq_one() 639 if (!cqe) { in ehca_poll_cq_one() 651 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) { in ehca_poll_cq_one() 656 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number); in ehca_poll_cq_one() 660 my_cq->cq_number, cqe->local_qp_number); in ehca_poll_cq_one() 661 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x", in ehca_poll_cq_one() 662 my_cq->cq_number, cqe->local_qp_number); in ehca_poll_cq_one() 673 cqe->local_qp_number, cqe->remote_qp_number); in ehca_poll_cq_one() 675 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", in ehca_poll_cq_one() [all …]
|
| D | ehca_cq.c | 121 int cqe = attr->cqe; in ehca_create_cq() local 139 if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) in ehca_create_cq() 189 param.nr_cqe = cqe + additional_cqe; in ehca_create_cq() 267 my_cq->ib_cq.cqe = my_cq->nr_of_entries = in ehca_create_cq() 375 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) in ehca_resize_cq() argument
|
| D | ipz_pt_fn.h | 144 struct ehca_cqe *cqe = ipz_qeit_get(queue); in ipz_qeit_is_valid() local 145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1)); in ipz_qeit_is_valid()
|
| D | ehca_iverbs.h | 140 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
|
| D | ehca_main.c | 565 cq_attr.cqe = 10; in ehca_create_aqp1()
|
| /linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
| D | en_rx.c | 719 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, in check_csum() argument 726 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); in check_csum() 728 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && in check_csum() 734 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) in check_csum() 737 else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) in check_csum() 748 struct mlx4_cqe *cqe; in mlx4_en_process_rx_cq() local 772 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() 775 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_rx_cq() 787 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_rx_cq() 790 ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, in mlx4_en_process_rx_cq() [all …]
|
| D | en_tx.c | 391 struct mlx4_cqe *cqe; in mlx4_en_process_tx_cq() local 414 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq() 421 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, in mlx4_en_process_tx_cq() 429 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_en_process_tx_cq() 431 struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe; in mlx4_en_process_tx_cq() 439 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; in mlx4_en_process_tx_cq() 448 timestamp = mlx4_en_get_cqe_ts(cqe); in mlx4_en_process_tx_cq() 467 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
|
| D | en_clock.c | 50 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe) in mlx4_en_get_cqe_ts() argument 53 struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe; in mlx4_en_get_cqe_ts()
|
| D | mlx4_en.h | 848 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
|
| D | en_netdev.c | 1580 struct mlx4_cqe *cqe = NULL; in mlx4_en_start_port() local 1582 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + in mlx4_en_start_port() 1584 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK; in mlx4_en_start_port()
|
| /linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
| D | ocrdma_verbs.c | 1073 int entries = attr->cqe; in ocrdma_create_cq() 1134 ibcq->cqe = new_cnt; in ocrdma_resize_cq() 1145 struct ocrdma_cqe *cqe = NULL; in ocrdma_flush_cq() local 1147 cqe = cq->va; in ocrdma_flush_cq() 1155 if (is_cqe_valid(cq, cqe)) in ocrdma_flush_cq() 1157 cqe++; in ocrdma_flush_cq() 1710 struct ocrdma_cqe *cqe; in ocrdma_discard_cqes() local 1729 cqe = cq->va + cur_getp; in ocrdma_discard_cqes() 1734 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes() 1740 if (is_cqe_for_sq(cqe)) { in ocrdma_discard_cqes() [all …]
|
| D | ocrdma.h | 499 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument 502 cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; in is_cqe_valid() 506 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) in is_cqe_for_sq() argument 508 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_for_sq() 512 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) in is_cqe_invalidated() argument 514 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_invalidated() 518 static inline int is_cqe_imm(struct ocrdma_cqe *cqe) in is_cqe_imm() argument 520 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_imm() 524 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) in is_cqe_wr_imm() argument 526 return (le32_to_cpu(cqe->flags_status_srcqpn) & in is_cqe_wr_imm()
|
| D | ocrdma_hw.c | 122 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *) in ocrdma_get_mcqe() local 125 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK)) in ocrdma_get_mcqe() 127 return cqe; in ocrdma_get_mcqe() 673 struct ocrdma_ae_mcqe *cqe) in ocrdma_dispatch_ibevent() argument 682 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >> in ocrdma_dispatch_ibevent() 684 u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK; in ocrdma_dispatch_ibevent() 685 u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK; in ocrdma_dispatch_ibevent() 692 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) { in ocrdma_dispatch_ibevent() 702 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) { in ocrdma_dispatch_ibevent() 799 struct ocrdma_ae_mcqe *cqe) in ocrdma_process_grp5_aync() argument [all …]
|
| D | ocrdma_verbs.h | 93 int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
|
| /linux-4.4.14/drivers/scsi/bnx2i/ |
| D | bnx2i.h | 506 struct cqe { struct 650 struct cqe *cq_virt; 654 struct cqe *cq_prod_qe; 655 struct cqe *cq_cons_qe; 656 struct cqe *cq_first_qe; 657 struct cqe *cq_last_qe; 774 struct cqe cqe; member 881 struct cqe *cqe);
|
| D | bnx2i_hwi.c | 1355 struct cqe *cqe) in bnx2i_process_scsi_cmd_resp() argument 1365 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1400 resp_cqe = (struct bnx2i_cmd_response *)cqe; in bnx2i_process_scsi_cmd_resp() 1452 struct cqe *cqe) in bnx2i_process_login_resp() argument 1461 login = (struct bnx2i_login_response *) cqe; in bnx2i_process_login_resp() 1520 struct cqe *cqe) in bnx2i_process_text_resp() argument 1529 text = (struct bnx2i_text_response *) cqe; in bnx2i_process_text_resp() 1581 struct cqe *cqe) in bnx2i_process_tmf_resp() argument 1588 tmf_cqe = (struct bnx2i_tmf_response *)cqe; in bnx2i_process_tmf_resp() 1620 struct cqe *cqe) in bnx2i_process_logout_resp() argument [all …]
|
| D | bnx2i_init.c | 455 work->bnx2i_conn, &work->cqe); in bnx2i_percpu_thread_destroy()
|
| /linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
| D | ehea_main.c | 543 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) in ehea_check_cqe() argument 545 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5; in ehea_check_cqe() 546 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0) in ehea_check_cqe() 548 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) && in ehea_check_cqe() 549 (cqe->header_length == 0)) in ehea_check_cqe() 555 struct sk_buff *skb, struct ehea_cqe *cqe, in ehea_fill_skb() argument 558 int length = cqe->num_bytes_transfered - 4; /*remove CRC */ in ehea_fill_skb() 565 if (cqe->status & EHEA_CQE_BLIND_CKSUM) { in ehea_fill_skb() 567 skb->csum = csum_unfold(~cqe->inet_checksum_value); in ehea_fill_skb() 576 struct ehea_cqe *cqe) in get_skb_by_index() argument [all …]
|
| D | ehea_qmr.h | 378 struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
|
| /linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/ |
| D | bnx2x_cmn.c | 356 struct eth_end_agg_rx_cqe *cqe) in bnx2x_update_sge_prod() argument 369 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i]))); in bnx2x_update_sge_prod() 372 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod() 377 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1])); in bnx2x_update_sge_prod() 411 const struct eth_fast_path_rx_cqe *cqe, in bnx2x_get_rxhash() argument 416 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) { in bnx2x_get_rxhash() 419 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE; in bnx2x_get_rxhash() 424 return le32_to_cpu(cqe->rss_hash_result); in bnx2x_get_rxhash() 432 struct eth_fast_path_rx_cqe *cqe) in bnx2x_tpa_start() argument 475 le16_to_cpu(cqe->pars_flags.flags); in bnx2x_tpa_start() [all …]
|
| D | bnx2x_cmn.h | 786 union eth_rx_cqe *cqe; in bnx2x_has_rx_work() local 790 cqe = &fp->rx_comp_ring[cons]; in bnx2x_has_rx_work() 791 cqe_fp = &cqe->fast_path_cqe; in bnx2x_has_rx_work()
|
| D | bnx2x_ethtool.c | 2445 union eth_rx_cqe *cqe; in bnx2x_run_loopback() local 2597 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)]; in bnx2x_run_loopback() 2598 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; in bnx2x_run_loopback() 2603 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len); in bnx2x_run_loopback() 2611 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset; in bnx2x_run_loopback()
|
| D | bnx2x_sp.h | 436 union event_ring_elem *cqe,
|
| D | bnx2x.h | 920 #define BNX2X_RX_SUM_FIX(cqe) \ argument 921 BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
|
| D | bnx2x_sp.c | 1617 union event_ring_elem *cqe, in bnx2x_complete_vlan_mac() argument 1637 if (cqe->message.error) in bnx2x_complete_vlan_mac()
|
| D | bnx2x_main.c | 1132 u32 *cqe = (u32 *)&fp->rx_comp_ring[j]; in bnx2x_panic_dump() local 1135 i, j, cqe[0], cqe[1], cqe[2], cqe[3]); in bnx2x_panic_dump()
|
| /linux-4.4.14/drivers/nvme/host/ |
| D | pci.c | 282 struct nvme_completion *cqe) in special_completion() argument 289 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion() 295 cqe->command_id, le16_to_cpup(&cqe->sq_id)); in special_completion() 314 struct nvme_completion *cqe) in async_req_completion() argument 316 u32 result = le32_to_cpup(&cqe->result); in async_req_completion() 317 u16 status = le16_to_cpup(&cqe->status) >> 1; in async_req_completion() 334 struct nvme_completion *cqe) in abort_completion() argument 338 u16 status = le16_to_cpup(&cqe->status) >> 1; in abort_completion() 339 u32 result = le32_to_cpup(&cqe->result); in abort_completion() 348 struct nvme_completion *cqe) in async_completion() argument [all …]
|
| /linux-4.4.14/include/uapi/rdma/ |
| D | ib_user_verbs.h | 353 __u32 cqe; member 362 __u32 cqe; member 372 __u32 cqe; member 384 __u32 cqe; member 389 __u32 cqe; member
|
| /linux-4.4.14/include/linux/mlx5/ |
| D | device.h | 614 static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) in get_cqe_lro_tcppsh() argument 616 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; in get_cqe_lro_tcppsh() 619 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) in get_cqe_l4_hdr_type() argument 621 return (cqe->l4_hdr_type_etc >> 4) & 0x7; in get_cqe_l4_hdr_type() 624 static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) in cqe_has_vlan() argument 626 return !!(cqe->l4_hdr_type_etc & 0x1); in cqe_has_vlan()
|
| /linux-4.4.14/drivers/net/ethernet/qlogic/qed/ |
| D | qed_spq.c | 381 struct eth_slow_path_rx_cqe *cqe, in qed_cqe_completion() argument 388 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); in qed_cqe_completion() 392 struct eth_slow_path_rx_cqe *cqe) in qed_eth_cqe_completion() argument 396 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); in qed_eth_cqe_completion() 400 cqe->ramrod_cmd_id); in qed_eth_cqe_completion()
|
| D | qed_sp.h | 45 struct eth_slow_path_rx_cqe *cqe);
|
| D | qed_l2.c | 1665 struct eth_slow_path_rx_cqe *cqe) in qed_fp_cqe_completion() argument 1668 cqe); in qed_fp_cqe_completion()
|
| /linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
| D | ipoib_verbs.c | 177 cq_attr.cqe = size; in ipoib_transport_dev_init() 185 cq_attr.cqe = ipoib_sendq_size; in ipoib_transport_dev_init()
|
| /linux-4.4.14/include/linux/qed/ |
| D | qed_eth_if.h | 156 struct eth_slow_path_rx_cqe *cqe);
|
| /linux-4.4.14/drivers/scsi/lpfc/ |
| D | lpfc_sli.h | 45 } cqe; member
|
| D | lpfc_sli.c | 364 struct lpfc_cqe *cqe; in lpfc_sli4_cq_get() local 372 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) in lpfc_sli4_cq_get() 379 cqe = q->qe[q->hba_index].cqe; in lpfc_sli4_cq_get() 392 return cqe; in lpfc_sli4_cq_get() 422 temp_qe = q->qe[q->host_index].cqe; in lpfc_sli4_cq_release() 3495 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli_handle_slow_ring_event_s4() 6715 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { in lpfc_sli4_mbox_completions_pending() 6716 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; in lpfc_sli4_mbox_completions_pending() 11404 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); in lpfc_sli4_fcp_xri_abort_event_proc() 11433 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); in lpfc_sli4_els_xri_abort_event_proc() [all …]
|
| D | lpfc_sli4.h | 116 struct lpfc_cqe *cqe; member
|
| D | lpfc_init.c | 855 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { in lpfc_sli4_free_sp_events() 4636 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { in lpfc_sli4_async_event_proc() 4639 &cq_event->cqe.acqe_link); in lpfc_sli4_async_event_proc() 4642 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip); in lpfc_sli4_async_event_proc() 4646 &cq_event->cqe.acqe_dcbx); in lpfc_sli4_async_event_proc() 4650 &cq_event->cqe.acqe_grp5); in lpfc_sli4_async_event_proc() 4653 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc); in lpfc_sli4_async_event_proc() 4656 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli); in lpfc_sli4_async_event_proc() 4662 &cq_event->cqe.mcqe_cmpl)); in lpfc_sli4_async_event_proc() 8006 struct lpfc_cq_event *cqe; in lpfc_sli4_cq_event_release_all() local [all …]
|
| /linux-4.4.14/net/rds/ |
| D | iw_cm.c | 202 cq_attr.cqe = send_size; in rds_iw_init_qp_attrs() 213 cq_attr.cqe = recv_size; in rds_iw_init_qp_attrs()
|
| D | ib_cm.c | 375 cq_attr.cqe = ic->i_send_ring.w_nr + 1; in rds_ib_setup_qp() 387 cq_attr.cqe = ic->i_recv_ring.w_nr; in rds_ib_setup_qp()
|
| /linux-4.4.14/drivers/atm/ |
| D | firestream.c | 597 struct FS_QENTRY *cqe; in submit_qentry() local 609 cqe = bus_to_virt (wp); in submit_qentry() 610 if (qe != cqe) { in submit_qentry() 611 fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe); in submit_qentry()
|
| /linux-4.4.14/drivers/infiniband/hw/nes/ |
| D | nes_verbs.c | 1515 int entries = attr->cqe; in nes_create_cq() 1556 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; in nes_create_cq() 3617 struct nes_hw_cqe cqe; in nes_poll_cq() local 3645 cqe = nescq->hw_cq.cq_vbase[head]; in nes_poll_cq() 3646 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]); in nes_poll_cq() 3650 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) | in nes_poll_cq() 3656 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) { in nes_poll_cq() 3659 err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]); in nes_poll_cq() 3674 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) { in nes_poll_cq() 3722 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]); in nes_poll_cq() [all …]
|
| /linux-4.4.14/drivers/net/ethernet/qlogic/qede/ |
| D | qede_main.c | 858 union eth_rx_cqe *cqe; in qede_rx_int() local 865 cqe = (union eth_rx_cqe *) in qede_rx_int() 867 cqe_type = cqe->fast_path_regular.type; in qede_rx_int() 872 (struct eth_slow_path_rx_cqe *)cqe); in qede_rx_int() 881 fp_cqe = &cqe->fast_path_regular; in qede_rx_int() 898 le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); in qede_rx_int()
|
| /linux-4.4.14/drivers/infiniband/core/ |
| D | uverbs_cmd.c | 1392 attr.cqe = cmd->cqe; in create_cq() 1419 resp.base.cqe = cq->cqe; in create_cq() 1491 cmd_ex.cqe = cmd.cqe; in ib_uverbs_create_cq() 1576 ret = cq->device->resize_cq(cq, cmd.cqe, &udata); in ib_uverbs_resize_cq() 1580 resp.cqe = cq->cqe; in ib_uverbs_resize_cq() 1583 &resp, sizeof resp.cqe)) in ib_uverbs_resize_cq()
|
| D | verbs.c | 1193 int ib_resize_cq(struct ib_cq *cq, int cqe) in ib_resize_cq() argument 1196 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS; in ib_resize_cq()
|
| D | mad.c | 3182 cq_attr.cqe = cq_size; in ib_mad_port_open()
|
| /linux-4.4.14/drivers/scsi/bnx2fc/ |
| D | bnx2fc_hwi.c | 1014 struct fcoe_cqe *cqe; in bnx2fc_process_new_cqes() local 1033 cqe = &cq[cq_cons]; in bnx2fc_process_new_cqes() 1035 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == in bnx2fc_process_new_cqes() 1068 cqe++; in bnx2fc_process_new_cqes() 1074 cqe = cq; in bnx2fc_process_new_cqes()
|
| /linux-4.4.14/include/rdma/ |
| D | ib_verbs.h | 190 unsigned int cqe; member 1316 int cqe; member 1716 int (*resize_cq)(struct ib_cq *cq, int cqe, 2483 int ib_resize_cq(struct ib_cq *cq, int cqe);
|
| /linux-4.4.14/drivers/staging/rdma/amso1100/ |
| D | c2_cq.c | 300 cq->ibcq.cqe = entries - 1; in c2_init_cq()
|
| D | c2_provider.c | 297 int entries = attr->cqe; in c2_create_cq()
|
| /linux-4.4.14/net/sunrpc/xprtrdma/ |
| D | verbs.c | 627 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1; in rpcrdma_ep_create() 644 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1; in rpcrdma_ep_create()
|
| D | svc_rdma_transport.c | 942 cq_attr.cqe = newxprt->sc_sq_depth; in svc_rdma_accept() 952 cq_attr.cqe = newxprt->sc_max_requests; in svc_rdma_accept()
|
| /linux-4.4.14/net/9p/ |
| D | trans_rdma.c | 698 cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1; in rdma_create_trans()
|
| /linux-4.4.14/drivers/infiniband/ulp/iser/ |
| D | iser_verbs.c | 117 cq_attr.cqe = max_cqe; in iser_create_device_ib_res()
|
| /linux-4.4.14/drivers/net/ethernet/broadcom/ |
| D | cnic.c | 2919 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) in cnic_l2_completion() local 2935 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; in cnic_l2_completion() 2936 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; in cnic_l2_completion() 2938 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); in cnic_l2_completion()
|
| /linux-4.4.14/drivers/infiniband/ulp/srpt/ |
| D | ib_srpt.c | 2032 cq_attr.cqe = ch->rq_size + srp_sq_size; in srpt_create_ch_ib() 2070 __func__, ch->cq->cqe, qp_init->cap.max_send_sge, in srpt_create_ch_ib()
|
| /linux-4.4.14/drivers/infiniband/ulp/srp/ |
| D | ib_srp.c | 500 cq_attr.cqe = target->queue_size + 1; in srp_create_ch_ib() 509 cq_attr.cqe = m * target->queue_size; in srp_create_ch_ib()
|
| /linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
| D | o2iblnd.c | 747 cq_attr.cqe = IBLND_CQ_ENTRIES(version); in kiblnd_create_conn()
|
| /linux-4.4.14/drivers/infiniband/ulp/isert/ |
| D | ib_isert.c | 320 cq_attr.cqe = max_cqe; in isert_alloc_comps()
|