Searched refs:ibcq (Results 1 - 36 of 36) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_cq.c63 if (head >= (unsigned) cq->ibcq.cqe) { ipath_cq_enter()
64 head = cq->ibcq.cqe; ipath_cq_enter()
70 if (cq->ibcq.event_handler) { ipath_cq_enter()
73 ev.device = cq->ibcq.device; ipath_cq_enter()
74 ev.element.cq = &cq->ibcq; ipath_cq_enter()
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); ipath_cq_enter()
115 to_idev(cq->ibcq.device)->n_wqe_errs++; ipath_cq_enter()
120 * @ibcq: the completion queue to poll
129 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) ipath_poll_cq() argument
131 struct ipath_cq *cq = to_icq(ibcq); ipath_poll_cq()
147 if (tail > (u32) cq->ibcq.cqe) ipath_poll_cq()
148 tail = (u32) cq->ibcq.cqe; ipath_poll_cq()
154 if (tail >= cq->ibcq.cqe) ipath_poll_cq()
181 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); send_complete()
279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. ipath_create_cq()
283 cq->ibcq.cqe = entries; ipath_create_cq()
292 ret = &cq->ibcq; ipath_create_cq()
308 * @ibcq: the completion queue to destroy.
314 int ipath_destroy_cq(struct ib_cq *ibcq) ipath_destroy_cq() argument
316 struct ipath_ibdev *dev = to_idev(ibcq->device); ipath_destroy_cq()
317 struct ipath_cq *cq = to_icq(ibcq); ipath_destroy_cq()
334 * @ibcq: the completion queue
342 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) ipath_req_notify_cq() argument
344 struct ipath_cq *cq = to_icq(ibcq); ipath_req_notify_cq()
367 * @ibcq: the completion queue
371 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) ipath_resize_cq() argument
373 struct ipath_cq *cq = to_icq(ibcq); ipath_resize_cq()
415 if (head > (u32) cq->ibcq.cqe) ipath_resize_cq()
416 head = (u32) cq->ibcq.cqe; ipath_resize_cq()
418 if (tail > (u32) cq->ibcq.cqe) ipath_resize_cq()
419 tail = (u32) cq->ibcq.cqe; ipath_resize_cq()
421 n = cq->ibcq.cqe + 1 + head - tail; ipath_resize_cq()
433 if (tail == (u32) cq->ibcq.cqe) ipath_resize_cq()
438 cq->ibcq.cqe = cqe; ipath_resize_cq()
447 struct ipath_ibdev *dev = to_idev(ibcq->device); ipath_resize_cq()
H A Dipath_verbs.h205 /* these are actually size ibcq.cqe + 1 */
215 struct ib_cq ibcq; member in struct:ipath_cq
670 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq) to_icq() argument
672 return container_of(ibcq, struct ipath_cq, ibcq); to_icq()
808 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
814 int ipath_destroy_cq(struct ib_cq *ibcq);
816 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
818 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_cq.c66 if (head >= (unsigned) cq->ibcq.cqe) { qib_cq_enter()
67 head = cq->ibcq.cqe; qib_cq_enter()
73 if (cq->ibcq.event_handler) { qib_cq_enter()
76 ev.device = cq->ibcq.device; qib_cq_enter()
77 ev.element.cq = &cq->ibcq; qib_cq_enter()
79 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); qib_cq_enter()
127 * @ibcq: the completion queue to poll
136 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) qib_poll_cq() argument
138 struct qib_cq *cq = to_icq(ibcq); qib_poll_cq()
154 if (tail > (u32) cq->ibcq.cqe) qib_poll_cq()
155 tail = (u32) cq->ibcq.cqe; qib_poll_cq()
161 if (tail >= cq->ibcq.cqe) qib_poll_cq()
195 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); send_complete()
294 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. qib_create_cq()
299 cq->ibcq.cqe = entries; qib_create_cq()
308 ret = &cq->ibcq; qib_create_cq()
324 * @ibcq: the completion queue to destroy.
330 int qib_destroy_cq(struct ib_cq *ibcq) qib_destroy_cq() argument
332 struct qib_ibdev *dev = to_idev(ibcq->device); qib_destroy_cq()
333 struct qib_cq *cq = to_icq(ibcq); qib_destroy_cq()
350 * @ibcq: the completion queue
358 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) qib_req_notify_cq() argument
360 struct qib_cq *cq = to_icq(ibcq); qib_req_notify_cq()
383 * @ibcq: the completion queue
387 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) qib_resize_cq() argument
389 struct qib_cq *cq = to_icq(ibcq); qib_resize_cq()
431 if (head > (u32) cq->ibcq.cqe) qib_resize_cq()
432 head = (u32) cq->ibcq.cqe; qib_resize_cq()
434 if (tail > (u32) cq->ibcq.cqe) qib_resize_cq()
435 tail = (u32) cq->ibcq.cqe; qib_resize_cq()
437 n = cq->ibcq.cqe + 1 + head - tail; qib_resize_cq()
449 if (tail == (u32) cq->ibcq.cqe) qib_resize_cq()
454 cq->ibcq.cqe = cqe; qib_resize_cq()
463 struct qib_ibdev *dev = to_idev(ibcq->device); qib_resize_cq()
H A Dqib_verbs.h260 /* these are actually size ibcq.cqe + 1 */
270 struct ib_cq ibcq; member in struct:qib_cq
823 static inline struct qib_cq *to_icq(struct ib_cq *ibcq) to_icq() argument
825 return container_of(ibcq, struct qib_cq, ibcq); to_icq()
1010 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
1016 int qib_destroy_cq(struct ib_cq *ibcq);
1018 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
1020 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dcq.c44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; mlx4_ib_cq_comp() local
45 ibcq->comp_handler(ibcq, ibcq->cq_context); mlx4_ib_cq_comp()
51 struct ib_cq *ibcq; mlx4_ib_cq_event() local
59 ibcq = &to_mibcq(cq)->ibcq; mlx4_ib_cq_event()
60 if (ibcq->event_handler) { mlx4_ib_cq_event()
61 event.device = ibcq->device; mlx4_ib_cq_event()
63 event.element.cq = ibcq; mlx4_ib_cq_event()
64 ibcq->event_handler(&event, ibcq->cq_context); mlx4_ib_cq_event()
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; get_sw_cqe()
186 cq->ibcq.cqe = entries - 1; mlx4_ib_create_cq()
250 return &cq->ibcq; mlx4_ib_create_cq()
262 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); mlx4_ib_create_cq()
346 cqe = get_cqe(cq, i & cq->ibcq.cqe); mlx4_ib_cq_resize_copy_cqes()
352 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); mlx4_ib_cq_resize_copy_cqes()
357 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); mlx4_ib_cq_resize_copy_cqes()
363 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mlx4_ib_resize_cq() argument
365 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); mlx4_ib_resize_cq()
366 struct mlx4_ib_cq *cq = to_mcq(ibcq); mlx4_ib_resize_cq()
378 if (entries == ibcq->cqe + 1) { mlx4_ib_resize_cq()
388 if (ibcq->uobject) { mlx4_ib_resize_cq()
412 if (ibcq->uobject) { mlx4_ib_resize_cq()
414 cq->ibcq.cqe = cq->resize_buf->cqe; mlx4_ib_resize_cq()
429 tmp_cqe = cq->ibcq.cqe; mlx4_ib_resize_cq()
431 cq->ibcq.cqe = cq->resize_buf->cqe; mlx4_ib_resize_cq()
446 if (!ibcq->uobject) mlx4_ib_resize_cq()
692 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); mlx4_ib_poll_one()
694 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); mlx4_ib_poll_one()
696 cq->ibcq.cqe = cq->resize_buf->cqe; mlx4_ib_poll_one()
712 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, mlx4_ib_poll_one()
730 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, mlx4_ib_poll_one()
846 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { mlx4_ib_poll_one()
882 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) mlx4_ib_poll_cq() argument
884 struct mlx4_ib_cq *cq = to_mcq(ibcq); mlx4_ib_poll_cq()
889 struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); mlx4_ib_poll_cq()
914 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) mlx4_ib_arm_cq() argument
916 mlx4_cq_arm(&to_mcq(ibcq)->mcq, mlx4_ib_arm_cq()
919 to_mdev(ibcq->device)->uar_map, mlx4_ib_arm_cq()
920 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); mlx4_ib_arm_cq()
941 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) __mlx4_ib_cq_clean()
949 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); __mlx4_ib_cq_clean()
957 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); __mlx4_ib_cq_clean()
H A Dmlx4_ib.h104 struct ib_cq ibcq; member in struct:mlx4_ib_cq
578 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) to_mcq() argument
580 return container_of(ibcq, struct mlx4_ib_cq, ibcq); to_mcq()
670 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
675 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dcq.c41 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; mlx5_ib_cq_comp() local
43 ibcq->comp_handler(ibcq, ibcq->cq_context); mlx5_ib_cq_comp()
49 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); mlx5_ib_cq_event()
50 struct ib_cq *ibcq = &cq->ibcq; mlx5_ib_cq_event() local
59 if (ibcq->event_handler) { mlx5_ib_cq_event()
62 event.element.cq = ibcq; mlx5_ib_cq_event()
63 ibcq->event_handler(&event, ibcq->cq_context); mlx5_ib_cq_event()
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { get_sw_cqe()
406 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); mlx5_poll_one()
546 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) mlx5_ib_poll_cq() argument
548 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_poll_cq()
573 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) mlx5_ib_arm_cq() argument
575 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; mlx5_ib_arm_cq()
578 mlx5_cq_arm(&to_mcq(ibcq)->mcq, mlx5_ib_arm_cq()
583 to_mcq(ibcq)->mcq.cons_index); mlx5_ib_arm_cq()
764 cq->ibcq.cqe = entries - 1; mlx5_ib_create_cq()
811 return &cq->ibcq; mlx5_ib_create_cq()
873 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) __mlx5_ib_cq_clean()
880 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); __mlx5_ib_cq_clean()
887 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); __mlx5_ib_cq_clean()
1013 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); copy_resize_cqes()
1067 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mlx5_ib_resize_cq() argument
1069 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); mlx5_ib_resize_cq()
1070 struct mlx5_ib_cq *cq = to_mcq(ibcq); mlx5_ib_resize_cq()
1091 if (entries == ibcq->cqe + 1) mlx5_ib_resize_cq()
1138 cq->ibcq.cqe = entries - 1; mlx5_ib_resize_cq()
1157 cq->ibcq.cqe = entries - 1; mlx5_ib_resize_cq()
1180 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) mlx5_ib_get_cqe_size() argument
1184 if (!ibcq) mlx5_ib_get_cqe_size()
1187 cq = to_mcq(ibcq); mlx5_ib_get_cqe_size()
H A Dmlx5_ib.h268 struct ib_cq ibcq; member in struct:mlx5_ib_cq
466 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) to_mcq() argument
468 return container_of(ibcq, struct mlx5_ib_cq, ibcq); to_mcq()
563 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
564 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
566 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
609 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); next_cqe_sw()
235 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); mthca_cq_completion()
259 event.element.cq = &cq->ibcq; mthca_cq_event()
260 if (cq->ibcq.event_handler) mthca_cq_event()
261 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); mthca_cq_event()
295 cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); mthca_cq_clean()
297 if (prod_index == cq->cons_index + cq->ibcq.cqe) mthca_cq_clean()
309 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); mthca_cq_clean()
315 memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), mthca_cq_clean()
321 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); mthca_cq_clean()
340 if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && mthca_cq_resize_copy_cqes()
341 cq->ibcq.cqe < cq->resize_buf->cqe) { mthca_cq_resize_copy_cqes()
342 cq->cons_index &= cq->ibcq.cqe; mthca_cq_resize_copy_cqes()
343 if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) mthca_cq_resize_copy_cqes()
344 cq->cons_index -= cq->ibcq.cqe + 1; mthca_cq_resize_copy_cqes()
347 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) mthca_cq_resize_copy_cqes()
350 get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); mthca_cq_resize_copy_cqes()
662 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, mthca_poll_cq() argument
665 struct mthca_dev *dev = to_mdev(ibcq->device); mthca_poll_cq()
666 struct mthca_cq *cq = to_mcq(ibcq); mthca_poll_cq()
706 cq->cons_index &= cq->ibcq.cqe; mthca_poll_cq()
714 tcqe = cq->ibcq.cqe; mthca_poll_cq()
716 cq->ibcq.cqe = cq->resize_buf->cqe; mthca_poll_cq()
744 int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) mthca_arbel_arm_cq() argument
746 struct mthca_cq *cq = to_mcq(ibcq); mthca_arbel_arm_cq()
770 to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, mthca_arbel_arm_cq()
771 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); mthca_arbel_arm_cq()
784 cq->ibcq.cqe = nent - 1; mthca_init_cq()
873 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); mthca_init_cq()
947 mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); mthca_free_cq()
H A Dmthca_provider.h202 struct ib_cq ibcq; member in struct:mthca_cq
324 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) to_mcq() argument
326 return container_of(ibcq, struct mthca_cq, ibcq); to_mcq()
H A Dmthca_provider.c704 return &cq->ibcq; mthca_create_cq()
767 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) mthca_resize_cq() argument
769 struct mthca_dev *dev = to_mdev(ibcq->device); mthca_resize_cq()
770 struct mthca_cq *cq = to_mcq(ibcq); mthca_resize_cq()
781 if (entries == ibcq->cqe + 1) { mthca_resize_cq()
821 tcqe = cq->ibcq.cqe; mthca_resize_cq()
823 cq->ibcq.cqe = cq->resize_buf->cqe; mthca_resize_cq()
835 ibcq->cqe = entries - 1; mthca_resize_cq()
H A Dmthca_dev.h494 int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_ev.c89 event.device = chp->ibcq.device; post_qp_event()
91 event.element.cq = &chp->ibcq; post_qp_event()
99 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); post_qp_event()
178 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); iwch_ev_dispatch()
H A Diwch_provider.h102 struct ib_cq ibcq; member in struct:iwch_cq
112 static inline struct iwch_cq *to_iwch_cq(struct ib_cq *ibcq) to_iwch_cq() argument
114 return container_of(ibcq, struct iwch_cq, ibcq); to_iwch_cq()
334 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
H A Diwch_qp.c828 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); __flush_qp()
842 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); __flush_qp()
864 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); flush_qp()
869 (*schp->ibcq.comp_handler)(&schp->ibcq, flush_qp()
870 schp->ibcq.cq_context); flush_qp()
H A Diwch_cq.c195 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) iwch_poll_cq() argument
203 chp = to_iwch_cq(ibcq); iwch_poll_cq()
H A Diwch_provider.c191 chp->ibcq.cqe = 1 << chp->cq.size_log2; iwch_create_cq()
207 iwch_destroy_cq(&chp->ibcq); iwch_create_cq()
234 iwch_destroy_cq(&chp->ibcq); iwch_create_cq()
242 return &chp->ibcq; iwch_create_cq()
293 chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; iwch_resize_cq()
313 static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) iwch_arm_cq() argument
322 chp = to_iwch_cq(ibcq); iwch_arm_cq()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dev.c105 event.device = chp->ibcq.device; post_qp_event()
107 event.element.cq = &chp->ibcq; post_qp_event()
114 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); post_qp_event()
235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); c4iw_ev_handler()
H A Dqp.c1143 (*rchp->ibcq.comp_handler)(&rchp->ibcq, __flush_qp()
1144 rchp->ibcq.cq_context); __flush_qp()
1150 (*rchp->ibcq.comp_handler)(&rchp->ibcq, __flush_qp()
1151 rchp->ibcq.cq_context); __flush_qp()
1156 (*schp->ibcq.comp_handler)(&schp->ibcq, __flush_qp()
1157 schp->ibcq.cq_context); __flush_qp()
1175 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); flush_qp()
1180 (*schp->ibcq.comp_handler)(&schp->ibcq, flush_qp()
1181 schp->ibcq.cq_context); flush_qp()
H A Dcq.c828 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) c4iw_poll_cq() argument
835 chp = to_c4iw_cq(ibcq); c4iw_poll_cq()
936 chp->ibcq.cqe = entries - 2; c4iw_create_cq()
981 return &chp->ibcq; c4iw_create_cq()
1001 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) c4iw_arm_cq() argument
1007 chp = to_c4iw_cq(ibcq); c4iw_arm_cq()
H A Diw_cxgb4.h425 struct ib_cq ibcq; member in struct:c4iw_cq
434 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) to_c4iw_cq() argument
436 return container_of(ibcq, struct c4iw_cq, ibcq); to_c4iw_cq()
957 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1000 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/linux-4.1.27/drivers/infiniband/hw/amso1100/
H A Dc2_cq.c78 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); c2_cq_event()
201 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) c2_poll_cq() argument
203 struct c2_dev *c2dev = to_c2dev(ibcq->device); c2_poll_cq()
204 struct c2_cq *cq = to_c2cq(ibcq); c2_poll_cq()
222 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) c2_arm_cq() argument
229 cq = to_c2cq(ibcq); c2_arm_cq()
300 cq->ibcq.cqe = entries - 1; c2_init_cq()
H A Dc2_provider.h92 struct ib_cq ibcq; member in struct:c2_cq
153 static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq) to_c2cq() argument
155 return container_of(ibcq, struct c2_cq, ibcq); to_c2cq()
H A Dc2_ae.c310 ib_event.element.cq = &cq->ibcq; c2_ae_event()
313 if (cq->ibcq.event_handler) c2_ae_event()
314 cq->ibcq.event_handler(&ib_event, c2_ae_event()
315 cq->ibcq.cq_context); c2_ae_event()
H A Dc2.h517 extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
518 extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
H A Dc2_provider.c309 return &cq->ibcq; c2_create_cq()
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
H A Docrdma.h297 struct ib_cq ibcq; member in struct:ocrdma_cq
454 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) get_ocrdma_cq() argument
456 return container_of(ibcq, struct ocrdma_cq, ibcq); get_ocrdma_cq()
H A Docrdma_verbs.c1045 return &cq->ibcq; ocrdma_create_cq()
1053 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt, ocrdma_resize_cq() argument
1057 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); ocrdma_resize_cq()
1063 ibcq->cqe = new_cnt; ocrdma_resize_cq()
1073 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); ocrdma_flush_cq()
1093 int ocrdma_destroy_cq(struct ib_cq *ibcq) ocrdma_destroy_cq() argument
1095 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); ocrdma_destroy_cq()
1097 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); ocrdma_destroy_cq()
2822 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); ocrdma_poll_hwcq()
2908 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ocrdma_poll_cq() argument
2911 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); ocrdma_poll_cq()
2912 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); ocrdma_poll_cq()
2944 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags) ocrdma_arm_cq() argument
2946 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); ocrdma_arm_cq()
2947 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); ocrdma_arm_cq()
H A Docrdma_hw.c678 ib_evt.element.cq = &cq->ibcq; ocrdma_dispatch_ibevent()
684 ib_evt.element.cq = &cq->ibcq; ocrdma_dispatch_ibevent()
744 if (cq->ibcq.event_handler) ocrdma_dispatch_ibevent()
745 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); ocrdma_dispatch_ibevent()
893 if (bcq && bcq->ibcq.comp_handler) { ocrdma_qp_buddy_cq_handler()
895 (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); ocrdma_qp_buddy_cq_handler()
912 if (cq->ibcq.comp_handler) { ocrdma_qp_cq_handler()
914 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); ocrdma_qp_cq_handler()
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_ib_verbs.h67 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
H A Dusnic_ib_verbs.c746 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, usnic_ib_poll_cq() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_main.c534 struct ib_cq *ibcq; ehca_create_aqp1() local
544 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); ehca_create_aqp1()
545 if (IS_ERR(ibcq)) { ehca_create_aqp1()
547 return PTR_ERR(ibcq); ehca_create_aqp1()
549 sport->ibcq_aqp1 = ibcq; ehca_create_aqp1()
558 qp_init_attr.send_cq = ibcq; ehca_create_aqp1()
559 qp_init_attr.recv_cq = ibcq; ehca_create_aqp1()
/linux-4.1.27/drivers/infiniband/hw/nes/
H A Dnes.h505 static inline struct nes_cq *to_nescq(struct ib_cq *ibcq) to_nescq() argument
507 return container_of(ibcq, struct nes_cq, ibcq); to_nescq()
H A Dnes_verbs.h112 struct ib_cq ibcq; member in struct:nes_cq
H A Dnes_hw.c3734 if (nescq->ibcq.event_handler) { nes_process_iwarp_aeqe()
3735 ibevent.device = nescq->ibcq.device; nes_process_iwarp_aeqe()
3737 ibevent.element.cq = &nescq->ibcq; nes_process_iwarp_aeqe()
3738 nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context); nes_process_iwarp_aeqe()
3763 if (nescq->ibcq.comp_handler) nes_iwarp_ce_handler()
3764 nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context); nes_iwarp_ce_handler()
H A Dnes_verbs.c1571 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1; nes_create_cq()
1779 return &nescq->ibcq; nes_create_cq()
3632 static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) nes_poll_cq() argument
3637 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); nes_poll_cq()
3639 struct nes_cq *nescq = to_nescq(ibcq); nes_poll_cq()
3807 static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) nes_req_notify_cq() argument
3809 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device); nes_req_notify_cq()
3811 struct nes_cq *nescq = to_nescq(ibcq); nes_req_notify_cq()

Completed in 767 milliseconds