/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | en_cq.c | 40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() argument 52 struct mlx4_en_cq *cq; in mlx4_en_create_cq() local 55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq() 56 if (!cq) { in mlx4_en_create_cq() 57 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in mlx4_en_create_cq() 58 if (!cq) { in mlx4_en_create_cq() 64 cq->size = entries; in mlx4_en_create_cq() 65 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq() 67 cq->ring = ring; in mlx4_en_create_cq() 68 cq->is_tx = mode; in mlx4_en_create_cq() [all …]
|
D | cq.c | 82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) in mlx4_add_cq_to_tasklet() argument 85 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; in mlx4_add_cq_to_tasklet() 93 if (list_empty_careful(&cq->tasklet_ctx.list)) { in mlx4_add_cq_to_tasklet() 94 atomic_inc(&cq->refcount); in mlx4_add_cq_to_tasklet() 95 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); in mlx4_add_cq_to_tasklet() 102 struct mlx4_cq *cq; in mlx4_cq_completion() local 104 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion() 106 if (!cq) { in mlx4_cq_completion() 111 ++cq->arm_sn; in mlx4_cq_completion() 113 cq->comp(cq); in mlx4_cq_completion() [all …]
|
D | mlx4_en.h | 626 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) in mlx4_en_cq_init_lock() argument 628 spin_lock_init(&cq->poll_lock); in mlx4_en_cq_init_lock() 629 cq->state = MLX4_EN_CQ_STATE_IDLE; in mlx4_en_cq_init_lock() 633 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq) in mlx4_en_cq_lock_napi() argument 636 spin_lock(&cq->poll_lock); in mlx4_en_cq_lock_napi() 637 if (cq->state & MLX4_CQ_LOCKED) { in mlx4_en_cq_lock_napi() 638 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI); in mlx4_en_cq_lock_napi() 639 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD; in mlx4_en_cq_lock_napi() 643 cq->state = MLX4_EN_CQ_STATE_NAPI; in mlx4_en_cq_lock_napi() 644 spin_unlock(&cq->poll_lock); in mlx4_en_cq_lock_napi() [all …]
|
D | en_rx.c | 744 int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) in mlx4_en_process_rx_cq() argument 749 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; in mlx4_en_process_rx_cq() 771 index = cq->mcq.cons_index & ring->size_mask; in mlx4_en_process_rx_cq() 772 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq() 776 cq->mcq.cons_index & cq->size)) { in mlx4_en_process_rx_cq() 878 if (!mlx4_en_cq_busy_polling(cq) && in mlx4_en_process_rx_cq() 880 struct sk_buff *gro_skb = napi_get_frags(&cq->napi); in mlx4_en_process_rx_cq() 929 skb_record_rx_queue(gro_skb, cq->ring); in mlx4_en_process_rx_cq() 930 skb_mark_napi_id(gro_skb, &cq->napi); in mlx4_en_process_rx_cq() 939 napi_gro_frags(&cq->napi); in mlx4_en_process_rx_cq() [all …]
|
D | en_tx.c | 197 int cq, int user_prio) in mlx4_en_activate_tx_ring() argument 202 ring->cqn = cq; in mlx4_en_activate_tx_ring() 386 struct mlx4_en_cq *cq) in mlx4_en_process_tx_cq() argument 389 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq() 390 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; in mlx4_en_process_tx_cq() 397 int size = cq->size; in mlx4_en_process_tx_cq() 399 struct mlx4_cqe *buf = cq->buf; in mlx4_en_process_tx_cq() 497 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq() local 498 struct mlx4_en_priv *priv = netdev_priv(cq->dev); in mlx4_en_tx_irq() 501 napi_schedule_irqoff(&cq->napi); in mlx4_en_tx_irq() [all …]
|
D | en_netdev.c | 76 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); in mlx4_en_low_latency_recv() local 77 struct net_device *dev = cq->dev; in mlx4_en_low_latency_recv() 79 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; in mlx4_en_low_latency_recv() 85 if (!mlx4_en_cq_lock_poll(cq)) in mlx4_en_low_latency_recv() 88 done = mlx4_en_process_rx_cq(dev, cq, 4); in mlx4_en_low_latency_recv() 94 mlx4_en_cq_unlock_poll(cq); in mlx4_en_low_latency_recv() 1215 struct mlx4_en_cq *cq; in mlx4_en_netpoll() local 1219 cq = priv->rx_cq[i]; in mlx4_en_netpoll() 1220 napi_schedule(&cq->napi); in mlx4_en_netpoll() 1330 struct mlx4_en_cq *cq; in mlx4_en_set_default_moderation() local [all …]
|
D | resource_tracker.c | 193 struct res_cq *cq; member 446 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; in mlx4_init_quotas() 456 dev->quotas.cq = in mlx4_init_quotas() 1594 enum res_cq_states state, struct res_cq **cq) in cq_res_start_move_to() argument 1624 if (cq) in cq_res_start_move_to() 1625 *cq = r; in cq_res_start_move_to() 3363 struct res_cq *cq = NULL; in mlx4_SW2HW_CQ_wrapper() local 3366 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); in mlx4_SW2HW_CQ_wrapper() 3379 cq->mtt = mtt; in mlx4_SW2HW_CQ_wrapper() 3399 struct res_cq *cq = NULL; in mlx4_HW2SW_CQ_wrapper() local [all …]
|
D | Makefile | 3 mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o fw_qos.o icm.o intf.o \
|
D | main.c | 846 dev->quotas.cq = func_cap.cq_quota; in mlx4_slave_cap()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 169 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument 171 return get_cqe_from_buf(&cq->buf, entry); in get_cqe() 179 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 204 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument 208 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index() 211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index() 224 struct mthca_cq *cq; in mthca_cq_completion() local 226 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 228 if (!cq) { in mthca_cq_completion() [all …]
|
D | mthca_provider.c | 655 struct mthca_cq *cq; in mthca_create_cq() local 682 cq = kmalloc(sizeof *cq, GFP_KERNEL); in mthca_create_cq() 683 if (!cq) { in mthca_create_cq() 689 cq->buf.mr.ibmr.lkey = ucmd.lkey; in mthca_create_cq() 690 cq->set_ci_db_index = ucmd.set_db_index; in mthca_create_cq() 691 cq->arm_db_index = ucmd.arm_db_index; in mthca_create_cq() 700 cq); in mthca_create_cq() 704 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { in mthca_create_cq() 705 mthca_free_cq(to_mdev(ibdev), cq); in mthca_create_cq() 710 cq->resize_buf = NULL; in mthca_create_cq() [all …]
|
D | mthca_dev.h | 242 struct mthca_array cq; member 496 int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); 497 int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); 500 struct mthca_cq *cq); 502 struct mthca_cq *cq); 506 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, 508 void mthca_cq_resize_copy_cqes(struct mthca_cq *cq);
|
D | mthca_qp.c | 1549 struct mthca_cq *cq; in mthca_wq_overflow() local 1555 cq = to_mcq(ib_cq); in mthca_wq_overflow() 1556 spin_lock(&cq->lock); in mthca_wq_overflow() 1558 spin_unlock(&cq->lock); in mthca_wq_overflow()
|
/linux-4.4.14/drivers/net/ethernet/cisco/enic/ |
D | vnic_cq.c | 29 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 31 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 33 cq->ctrl = NULL; in vnic_cq_free() 36 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 41 cq->index = index; in vnic_cq_alloc() 42 cq->vdev = vdev; in vnic_cq_alloc() 44 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 45 if (!cq->ctrl) { in vnic_cq_alloc() 50 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 57 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
D | vnic_cq.h | 72 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument 83 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 84 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 88 while (color != cq->last_color) { in vnic_cq_service() 90 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service() 94 cq->to_clean++; in vnic_cq_service() 95 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 96 cq->to_clean = 0; in vnic_cq_service() 97 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service() 100 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() [all …]
|
D | enic_main.c | 1111 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf() local 1196 enic_intr_update_pkt_size(&cq->pkt_size_counter, in enic_rq_indicate_buf() 1225 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation() local 1226 u32 timer = cq->tobe_rx_coal_timeval; in enic_set_int_moderation() 1228 if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { in enic_set_int_moderation() 1230 cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; in enic_set_int_moderation() 1237 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation() local 1238 struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; in enic_calc_int_moderation() 1246 delta = ktime_us_delta(now, cq->prev_ts); in enic_calc_int_moderation() 1249 cq->prev_ts = now; in enic_calc_int_moderation() [all …]
|
D | enic.h | 188 ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; member 248 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; in enic_msix_rq_intr() 254 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; in enic_msix_wq_intr()
|
D | enic_res.c | 191 vnic_cq_free(&enic->cq[i]); in enic_free_vnic_resources() 275 vnic_cq_init(&enic->cq[i], in enic_init_vnic_resources() 350 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, in enic_alloc_vnic_resources() 354 err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, in enic_alloc_vnic_resources()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | cq.c | 42 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument 44 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() 48 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument 55 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event() 59 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event() 63 event.element.cq = ibcq; in mlx4_ib_cq_event() 73 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument 75 return get_cqe_from_buf(&cq->buf, n); in get_cqe() 78 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument 80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() [all …]
|
D | mlx4_ib.h | 102 struct ib_cq *cq; member 431 struct ib_cq *cq; member 716 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 722 int mlx4_ib_destroy_cq(struct ib_cq *cq); 724 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); 725 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); 726 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
|
D | Makefile | 3 mlx4_ib-y := ah.o cq.o doorbell.o mad.o main.o mr.o qp.o srq.o mcg.o cm.o alias_GUID.o sysfs.o
|
D | mad.c | 1124 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) in mlx4_ib_tunnel_comp_handler() argument 1127 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; in mlx4_ib_tunnel_comp_handler() 1555 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_tunnel_comp_worker() 1557 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_tunnel_comp_worker() 1627 qp_init_attr.init_attr.send_cq = ctx->cq; in create_pv_sqp() 1628 qp_init_attr.init_attr.recv_cq = ctx->cq; in create_pv_sqp() 1721 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); in mlx4_ib_sqp_comp_worker() 1723 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { in mlx4_ib_sqp_comp_worker() 1832 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, in create_pv_resources() 1834 if (IS_ERR(ctx->cq)) { in create_pv_resources() [all …]
|
D | srq.c | 186 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; in mlx4_ib_create_srq()
|
D | main.c | 460 props->max_cq = dev->dev->quotas.cq; in mlx4_ib_query_device() 1202 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); in mlx4_ib_alloc_xrcd() 1203 if (IS_ERR(xrcd->cq)) { in mlx4_ib_alloc_xrcd() 1204 err = PTR_ERR(xrcd->cq); in mlx4_ib_alloc_xrcd() 1221 ib_destroy_cq(to_mxrcd(xrcd)->cq); in mlx4_ib_dealloc_xrcd()
|
D | qp.c | 986 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs() 1145 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; in mlx4_ib_create_qp() 2489 struct mlx4_ib_cq *cq; in mlx4_wq_overflow() local 2495 cq = to_mcq(ib_cq); in mlx4_wq_overflow() 2496 spin_lock(&cq->lock); in mlx4_wq_overflow() 2498 spin_unlock(&cq->lock); in mlx4_wq_overflow()
|
/linux-4.4.14/drivers/scsi/fnic/ |
D | vnic_cq.c | 24 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument 26 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free() 28 cq->ctrl = NULL; in vnic_cq_free() 31 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument 36 cq->index = index; in vnic_cq_alloc() 37 cq->vdev = vdev; in vnic_cq_alloc() 39 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc() 40 if (!cq->ctrl) { in vnic_cq_alloc() 45 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc() 52 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument [all …]
|
D | vnic_cq_copy.h | 24 struct vnic_cq *cq, in vnic_cq_copy_service() argument 35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() 36 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service() 39 while (color != cq->last_color) { in vnic_cq_copy_service() 41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service() 44 cq->to_clean++; in vnic_cq_copy_service() 45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service() 46 cq->to_clean = 0; in vnic_cq_copy_service() 47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service() 50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service() [all …]
|
D | vnic_cq.h | 70 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument 81 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() 82 cq->ring.desc_size * cq->to_clean); in vnic_cq_service() 86 while (color != cq->last_color) { in vnic_cq_service() 88 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service() 92 cq->to_clean++; in vnic_cq_service() 93 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service() 94 cq->to_clean = 0; in vnic_cq_service() 95 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service() 98 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service() [all …]
|
D | fnic_res.c | 224 vnic_cq_free(&fnic->cq[i]); in fnic_free_vnic_resources() 286 &fnic->cq[cq_index], cq_index, in fnic_alloc_vnic_resources() 296 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, in fnic_alloc_vnic_resources() 307 err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], in fnic_alloc_vnic_resources() 389 vnic_cq_init(&fnic->cq[i], in fnic_alloc_vnic_resources()
|
D | fnic.h | 300 ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; member
|
D | fnic_fcs.c | 915 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, in fnic_rq_cmpl_handler() 1254 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], in fnic_wq_cmpl_handler()
|
D | fnic_main.c | 507 vnic_cq_clean(&fnic->cq[i]); in fnic_cleanup()
|
D | fnic_scsi.c | 1285 cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], in fnic_wq_copy_cmpl_handler()
|
/linux-4.4.14/drivers/scsi/snic/ |
D | vnic_cq.c | 24 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument 26 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free() 28 cq->ctrl = NULL; in svnic_cq_free() 31 int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, in svnic_cq_alloc() argument 36 cq->index = index; in svnic_cq_alloc() 37 cq->vdev = vdev; in svnic_cq_alloc() 39 cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); in svnic_cq_alloc() 40 if (!cq->ctrl) { in svnic_cq_alloc() 46 err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in svnic_cq_alloc() 53 void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in svnic_cq_init() argument [all …]
|
D | vnic_cq_fw.h | 24 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument 35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() 36 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service() 39 while (color != cq->last_color) { in vnic_cq_fw_service() 41 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service() 44 cq->to_clean++; in vnic_cq_fw_service() 45 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service() 46 cq->to_clean = 0; in vnic_cq_fw_service() 47 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service() 50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service() [all …]
|
D | vnic_cq.h | 60 static inline unsigned int svnic_cq_service(struct vnic_cq *cq, in svnic_cq_service() argument 71 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() 72 cq->ring.desc_size * cq->to_clean); in svnic_cq_service() 76 while (color != cq->last_color) { in svnic_cq_service() 78 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service() 82 cq->to_clean++; in svnic_cq_service() 83 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service() 84 cq->to_clean = 0; in svnic_cq_service() 85 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service() 88 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service() [all …]
|
D | snic_res.c | 140 svnic_cq_free(&snic->cq[i]); in snic_free_vnic_res() 189 &snic->cq[i], in snic_alloc_vnic_res() 201 &snic->cq[i], in snic_alloc_vnic_res() 233 svnic_cq_init(&snic->cq[i], in snic_alloc_vnic_res()
|
D | snic.h | 339 ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX]; member
|
D | snic_io.c | 87 work_done += svnic_cq_service(&snic->cq[i], in snic_wq_cmpl_handler()
|
D | snic_main.c | 247 svnic_cq_clean(&snic->cq[i]); in snic_cleanup()
|
D | snic_scsi.c | 1270 nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], in snic_fwcq_cmpl_handler()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_cq.c | 48 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) in ipath_cq_enter() argument 55 spin_lock_irqsave(&cq->lock, flags); in ipath_cq_enter() 61 wc = cq->queue; in ipath_cq_enter() 63 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter() 64 head = cq->ibcq.cqe; in ipath_cq_enter() 69 spin_unlock_irqrestore(&cq->lock, flags); in ipath_cq_enter() 70 if (cq->ibcq.event_handler) { in ipath_cq_enter() 73 ev.device = cq->ibcq.device; in ipath_cq_enter() 74 ev.element.cq = &cq->ibcq; in ipath_cq_enter() 76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in ipath_cq_enter() [all …]
|
D | ipath_verbs.h | 814 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_cq.c | 51 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) in qib_cq_enter() argument 58 spin_lock_irqsave(&cq->lock, flags); in qib_cq_enter() 64 wc = cq->queue; in qib_cq_enter() 66 if (head >= (unsigned) cq->ibcq.cqe) { in qib_cq_enter() 67 head = cq->ibcq.cqe; in qib_cq_enter() 72 spin_unlock_irqrestore(&cq->lock, flags); in qib_cq_enter() 73 if (cq->ibcq.event_handler) { in qib_cq_enter() 76 ev.device = cq->ibcq.device; in qib_cq_enter() 77 ev.element.cq = &cq->ibcq; in qib_cq_enter() 79 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in qib_cq_enter() [all …]
|
D | qib_verbs.h | 1018 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | cq.c | 67 void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int solicited) in hfi1_cq_enter() argument 74 spin_lock_irqsave(&cq->lock, flags); in hfi1_cq_enter() 80 wc = cq->queue; in hfi1_cq_enter() 82 if (head >= (unsigned) cq->ibcq.cqe) { in hfi1_cq_enter() 83 head = cq->ibcq.cqe; in hfi1_cq_enter() 88 spin_unlock_irqrestore(&cq->lock, flags); in hfi1_cq_enter() 89 if (cq->ibcq.event_handler) { in hfi1_cq_enter() 92 ev.device = cq->ibcq.device; in hfi1_cq_enter() 93 ev.element.cq = &cq->ibcq; in hfi1_cq_enter() 95 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in hfi1_cq_enter() [all …]
|
D | user_sdma.c | 187 struct hfi1_user_sdma_comp_q *cq; member 359 struct hfi1_user_sdma_comp_q *cq; in hfi1_user_sdma_alloc_queues() local 408 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in hfi1_user_sdma_alloc_queues() 409 if (!cq) in hfi1_user_sdma_alloc_queues() 412 memsize = ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size, in hfi1_user_sdma_alloc_queues() 414 cq->comps = vmalloc_user(memsize); in hfi1_user_sdma_alloc_queues() 415 if (!cq->comps) in hfi1_user_sdma_alloc_queues() 418 cq->nentries = hfi1_sdma_comp_ring_size; in hfi1_user_sdma_alloc_queues() 419 user_sdma_comp_fp(fp) = cq; in hfi1_user_sdma_alloc_queues() 427 kfree(cq); in hfi1_user_sdma_alloc_queues() [all …]
|
D | Makefile | 10 hfi1-y := chip.o cq.o device.o diag.o dma.o driver.o eprom.o file_ops.o firmware.o \
|
D | file_ops.c | 441 struct hfi1_user_sdma_comp_q *cq; in hfi1_write_iter() local 460 cq = user_sdma_comp_fp(kiocb->ki_filp); in hfi1_write_iter() 669 struct hfi1_user_sdma_comp_q *cq; in hfi1_file_mmap() local 675 cq = user_sdma_comp_fp(fp); in hfi1_file_mmap() 676 memaddr = (u64)cq->comps; in hfi1_file_mmap() 677 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE); in hfi1_file_mmap()
|
D | hfi.h | 1101 struct hfi1_user_sdma_comp_q *cq; member 1436 (((struct hfi1_filedata *)(fp)->private_data)->cq)
|
D | verbs.h | 995 void hfi1_cq_enter(struct hfi1_cq *cq, struct ib_wc *entry, int sig);
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
D | c2_cq.c | 48 struct c2_cq *cq; in c2_cq_get() local 52 cq = c2dev->qptr_array[cqn]; in c2_cq_get() 53 if (!cq) { in c2_cq_get() 57 atomic_inc(&cq->refcount); in c2_cq_get() 59 return cq; in c2_cq_get() 62 static void c2_cq_put(struct c2_cq *cq) in c2_cq_put() argument 64 if (atomic_dec_and_test(&cq->refcount)) in c2_cq_put() 65 wake_up(&cq->wait); in c2_cq_put() 70 struct c2_cq *cq; in c2_cq_event() local 72 cq = c2_cq_get(c2dev, mq_index); in c2_cq_event() [all …]
|
D | c2_ae.c | 305 struct c2_cq *cq = in c2_ae_event() local 310 ib_event.element.cq = &cq->ibcq; in c2_ae_event() 313 if (cq->ibcq.event_handler) in c2_ae_event() 314 cq->ibcq.event_handler(&ib_event, in c2_ae_event() 315 cq->ibcq.cq_context); in c2_ae_event()
|
D | c2_provider.c | 298 struct c2_cq *cq; in c2_create_cq() local 304 cq = kmalloc(sizeof(*cq), GFP_KERNEL); in c2_create_cq() 305 if (!cq) { in c2_create_cq() 310 err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq); in c2_create_cq() 313 kfree(cq); in c2_create_cq() 317 return &cq->ibcq; in c2_create_cq() 322 struct c2_cq *cq = to_c2cq(ib_cq); in c2_destroy_cq() local 326 c2_free_cq(to_c2dev(ib_cq->device), cq); in c2_destroy_cq() 327 kfree(cq); in c2_destroy_cq()
|
D | c2.h | 513 struct c2_ucontext *ctx, struct c2_cq *cq); 514 extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | cq.c | 40 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) in mlx5_ib_cq_comp() argument 42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() 49 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local 50 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event() 51 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() 63 event.element.cq = ibcq; in mlx5_ib_cq_event() 73 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument 75 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe() 83 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument 85 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() [all …]
|
D | main.c | 1067 ib_destroy_cq(dev->umrc.cq); in destroy_umrc_res() 1080 struct ib_cq *cq; in create_umr_res() local 1100 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, in create_umr_res() 1102 if (IS_ERR(cq)) { in create_umr_res() 1104 ret = PTR_ERR(cq); in create_umr_res() 1107 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in create_umr_res() 1109 init_attr->send_cq = cq; in create_umr_res() 1110 init_attr->recv_cq = cq; in create_umr_res() 1155 dev->umrc.cq = cq; in create_umr_res() 1174 ib_destroy_cq(cq); in create_umr_res() [all …]
|
D | mlx5_ib.h | 354 struct ib_cq *cq; member 504 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 505 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 543 int mlx5_ib_destroy_cq(struct ib_cq *cq); 546 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); 604 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
|
D | Makefile | 3 mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
|
D | mr.c | 737 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) in mlx5_umr_cq_handler() argument 744 err = ib_poll_cq(cq, 1, &wc); in mlx5_umr_cq_handler() 756 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in mlx5_umr_cq_handler()
|
D | srq.c | 295 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); in mlx5_ib_create_srq()
|
D | qp.c | 1815 struct mlx5_ib_cq *cq; in mlx5_wq_overflow() local 1822 cq = to_mcq(ib_cq); in mlx5_wq_overflow() 1823 spin_lock(&cq->lock); in mlx5_wq_overflow() 1825 spin_unlock(&cq->lock); in mlx5_wq_overflow()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | cq.c | 44 struct mlx5_core_cq *cq; in mlx5_cq_completion() local 48 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_cq_completion() 49 if (likely(cq)) in mlx5_cq_completion() 50 atomic_inc(&cq->refcount); in mlx5_cq_completion() 53 if (!cq) { in mlx5_cq_completion() 58 ++cq->arm_sn; in mlx5_cq_completion() 60 cq->comp(cq); in mlx5_cq_completion() 62 if (atomic_dec_and_test(&cq->refcount)) in mlx5_cq_completion() 63 complete(&cq->free); in mlx5_cq_completion() 69 struct mlx5_core_cq *cq; in mlx5_cq_event() local [all …]
|
D | en_txrx.c | 35 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) in mlx5e_get_cqe() argument 37 struct mlx5_cqwq *wq = &cq->wq; in mlx5e_get_cqe() 62 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); in mlx5e_napi_poll() 64 busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); in mlx5e_napi_poll() 80 mlx5e_cq_arm(&c->sq[i].cq); in mlx5e_napi_poll() 81 mlx5e_cq_arm(&c->rq.cq); in mlx5e_napi_poll() 88 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_completion_event() local 90 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); in mlx5e_completion_event() 91 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); in mlx5e_completion_event() 93 napi_schedule(cq->napi); in mlx5e_completion_event() [all …]
|
D | en_rx.c | 218 bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) in mlx5e_poll_rx_cq() argument 220 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); in mlx5e_poll_rx_cq() 224 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) in mlx5e_poll_rx_cq() 234 cqe = mlx5e_get_cqe(cq); in mlx5e_poll_rx_cq() 238 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_rx_cq() 260 napi_gro_receive(cq->napi, skb); in mlx5e_poll_rx_cq() 267 mlx5_cqwq_update_db_record(&cq->wq); in mlx5e_poll_rx_cq() 273 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); in mlx5e_poll_rx_cq()
|
D | en_main.c | 393 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); in mlx5e_enable_rq() 622 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); in mlx5e_enable_sq() 743 struct mlx5e_cq *cq) in mlx5e_create_cq() argument 747 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5e_create_cq() 757 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, in mlx5e_create_cq() 758 &cq->wq_ctrl); in mlx5e_create_cq() 764 cq->napi = &c->napi; in mlx5e_create_cq() 767 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5e_create_cq() 768 mcq->arm_db = cq->wq_ctrl.db.db + 1; in mlx5e_create_cq() 777 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { in mlx5e_create_cq() [all …]
|
D | debugfs.c | 395 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, in cq_read_field() argument 409 err = mlx5_core_query_cq(dev, cq, out); in cq_read_field() 417 param = cq->pid; in cq_read_field() 587 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) in mlx5_debug_cq_add() argument 595 &cq->dbg, cq->cqn, cq_fields, in mlx5_debug_cq_add() 596 ARRAY_SIZE(cq_fields), cq); in mlx5_debug_cq_add() 598 cq->dbg = NULL; in mlx5_debug_cq_add() 603 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) in mlx5_debug_cq_remove() argument 608 if (cq->dbg) in mlx5_debug_cq_remove() 609 rem_res_tree(cq->dbg); in mlx5_debug_cq_remove()
|
D | en.h | 317 struct mlx5e_cq cq; member 367 struct mlx5e_cq cq; member 566 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); 567 bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 569 struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); 618 static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) in mlx5e_cq_arm() argument 622 mcq = &cq->mcq; in mlx5e_cq_arm() 623 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); in mlx5e_cq_arm()
|
D | en_tx.c | 315 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) in mlx5e_poll_tx_cq() argument 325 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags)) in mlx5e_poll_tx_cq() 328 sq = container_of(cq, struct mlx5e_sq, cq); in mlx5e_poll_tx_cq() 346 cqe = mlx5e_get_cqe(cq); in mlx5e_poll_tx_cq() 350 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_tx_cq() 384 mlx5_cqwq_update_db_record(&cq->wq); in mlx5e_poll_tx_cq() 401 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags); in mlx5e_poll_tx_cq()
|
D | Makefile | 4 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
|
D | en_ethtool.c | 429 &c->sq[tc].cq.mcq, in mlx5e_set_coalesce() 434 mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, in mlx5e_set_coalesce()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 35 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in destroy_cq() argument 60 res->u.cq.restype = FW_RI_RES_TYPE_CQ; in destroy_cq() 61 res->u.cq.op = FW_RI_RES_OP_RESET; in destroy_cq() 62 res->u.cq.iqid = cpu_to_be32(cq->cqid); in destroy_cq() 70 kfree(cq->sw_queue); in destroy_cq() 72 cq->memsize, cq->queue, in destroy_cq() 73 dma_unmap_addr(cq, mapping)); in destroy_cq() 74 c4iw_put_cqid(rdev, cq->cqid, uctx); in destroy_cq() 78 static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, in create_cq() argument 89 cq->cqid = c4iw_get_cqid(rdev, uctx); in create_cq() [all …]
|
D | t4.h | 566 static inline void write_gts(struct t4_cq *cq, u32 val) in write_gts() argument 568 if (cq->bar2_va) in write_gts() 569 writel(val | INGRESSQID_V(cq->bar2_qid), in write_gts() 570 cq->bar2_va + SGE_UDB_GTS); in write_gts() 572 writel(val | INGRESSQID_V(cq->cqid), cq->gts); in write_gts() 575 static inline int t4_clear_cq_armed(struct t4_cq *cq) in t4_clear_cq_armed() argument 577 return test_and_clear_bit(CQ_ARMED, &cq->flags); in t4_clear_cq_armed() 580 static inline int t4_arm_cq(struct t4_cq *cq, int se) in t4_arm_cq() argument 584 set_bit(CQ_ARMED, &cq->flags); in t4_arm_cq() 585 while (cq->cidx_inc > CIDXINC_M) { in t4_arm_cq() [all …]
|
D | Makefile | 5 iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o id_table.o
|
D | ev.c | 107 event.element.cq = &chp->ibcq; in post_qp_event() 233 t4_clear_cq_armed(&chp->cq); in c4iw_ev_handler()
|
D | qp.c | 1138 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp() 1139 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp() 1153 if (t4_clear_cq_armed(&rchp->cq) && in __flush_qp() 1161 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { in __flush_qp() 1167 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { in __flush_qp() 1186 t4_set_cq_in_error(&rchp->cq); in flush_qp() 1191 t4_set_cq_in_error(&schp->cq); in flush_qp() 1642 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp() 1643 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); in c4iw_create_qp() 1682 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp() [all …]
|
D | iw_cxgb4.h | 415 struct t4_cq cq; member 988 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 1007 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); 1009 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
|
D | device.c | 787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || in c4iw_rdev_open() 788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { in c4iw_rdev_open() 792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, in c4iw_rdev_open() 793 rdev->lldi.vr->cq.size); in c4iw_rdev_open() 810 rdev->lldi.vr->cq.start, in c4iw_rdev_open() 811 rdev->lldi.vr->cq.size); in c4iw_rdev_open() 919 infop->vr->cq.size > 0; in rdma_supported()
|
D | t4fw_ri_api.h | 298 } cq; member
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_cq.c | 55 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) in ehca_cq_assign_qp() argument 61 spin_lock_irqsave(&cq->spinlock, flags); in ehca_cq_assign_qp() 62 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); in ehca_cq_assign_qp() 63 spin_unlock_irqrestore(&cq->spinlock, flags); in ehca_cq_assign_qp() 65 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", in ehca_cq_assign_qp() 66 cq->cq_number, qp_num); in ehca_cq_assign_qp() 71 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) in ehca_cq_unassign_qp() argument 79 spin_lock_irqsave(&cq->spinlock, flags); in ehca_cq_unassign_qp() 80 hlist_for_each(iter, &cq->qp_hashtab[key]) { in ehca_cq_unassign_qp() 84 ehca_dbg(cq->ib_cq.device, in ehca_cq_unassign_qp() [all …]
|
D | ehca_irq.c | 77 static inline void comp_event_callback(struct ehca_cq *cq) in comp_event_callback() argument 79 if (!cq->ib_cq.comp_handler) in comp_event_callback() 82 spin_lock(&cq->cb_lock); in comp_event_callback() 83 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context); in comp_event_callback() 84 spin_unlock(&cq->cb_lock); in comp_event_callback() 111 struct ehca_cq *cq = (struct ehca_cq *)data; in print_error_data() local 115 cq->cq_number, resource); in print_error_data() 243 struct ehca_cq *cq; in cq_event_callback() local 247 cq = idr_find(&ehca_cq_idr, token); in cq_event_callback() 248 if (cq) in cq_event_callback() [all …]
|
D | ehca_uverbs.c | 161 static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, in ehca_mmap_cq() argument 168 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); in ehca_mmap_cq() 169 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); in ehca_mmap_cq() 171 ehca_err(cq->ib_cq.device, in ehca_mmap_cq() 173 ret, cq->cq_number); in ehca_mmap_cq() 179 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); in ehca_mmap_cq() 180 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); in ehca_mmap_cq() 182 ehca_err(cq->ib_cq.device, in ehca_mmap_cq() 184 ret, cq->cq_number); in ehca_mmap_cq() 190 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x", in ehca_mmap_cq() [all …]
|
D | hipz_fns_core.h | 75 static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes) in hipz_update_feca() argument 77 hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca, in hipz_update_feca() 81 static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value) in hipz_set_cqx_n0() argument 85 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0, in hipz_set_cqx_n0() 88 cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0); in hipz_set_cqx_n0() 91 static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value) in hipz_set_cqx_n1() argument 95 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1, in hipz_set_cqx_n1() 97 cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1); in hipz_set_cqx_n1()
|
D | ehca_reqs.c | 626 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) in ehca_poll_cq_one() argument 629 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_poll_cq_one() 642 ehca_dbg(cq->device, "Completion queue is empty " in ehca_poll_cq_one() 658 ehca_err(cq->device, "cq_num=%x qp_num=%x " in ehca_poll_cq_one() 671 ehca_dbg(cq->device, in ehca_poll_cq_one() 691 ehca_dbg(cq->device, in ehca_poll_cq_one() 696 ehca_dbg(cq->device, in ehca_poll_cq_one() 738 ehca_warn(cq->device, "Double cqe on qp_num=%#x", in ehca_poll_cq_one() 761 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x " in ehca_poll_cq_one() 799 static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, in generate_flush_cqes() argument [all …]
|
D | ehca_iverbs.h | 138 int ehca_destroy_cq(struct ib_cq *cq); 140 int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); 142 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); 144 int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); 146 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
|
D | ehca_classes.h | 73 struct ehca_cq *cq; member 478 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); 479 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num); 480 struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
|
D | hcp_if.c | 249 struct ehca_cq *cq, in hipz_h_alloc_resource_cq() argument 260 cq->token, /* r7 */ in hipz_h_alloc_resource_cq() 263 cq->ipz_cq_handle.handle = outs[0]; in hipz_h_alloc_resource_cq() 268 rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); in hipz_h_alloc_resource_cq() 275 cq->ipz_cq_handle.handle, /* r5 */ in hipz_h_alloc_resource_cq() 686 struct ehca_cq *cq, in hipz_h_destroy_cq() argument 691 ret = hcp_galpas_dtor(&cq->galpas); in hipz_h_destroy_cq() 699 cq->ipz_cq_handle.handle, /* r5 */ in hipz_h_destroy_cq()
|
D | hcp_if.h | 72 struct ehca_cq *cq, 187 struct ehca_cq *cq,
|
D | ehca_qp.c | 422 static void del_from_err_list(struct ehca_cq *cq, struct list_head *node) in del_from_err_list() argument 426 spin_lock_irqsave(&cq->spinlock, flags); in del_from_err_list() 431 spin_unlock_irqrestore(&cq->spinlock, flags); in del_from_err_list()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 71 int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, in cxio_hal_cq_op() argument 79 setup.id = cq->cqid; in cxio_hal_cq_op() 92 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { in cxio_hal_cq_op() 95 rptr = cq->rptr; in cxio_hal_cq_op() 101 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) in cxio_hal_cq_op() 109 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op() 110 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op() 158 int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) in cxio_create_cq() argument 161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); in cxio_create_cq() 164 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); in cxio_create_cq() [all …]
|
D | cxio_wr.h | 735 static inline int cxio_cq_in_error(struct t3_cq *cq) in cxio_cq_in_error() argument 738 &cq->queue[1 << cq->size_log2])->cq_err; in cxio_cq_in_error() 741 static inline void cxio_set_cq_in_error(struct t3_cq *cq) in cxio_set_cq_in_error() argument 744 &cq->queue[1 << cq->size_log2])->cq_err = 1; in cxio_set_cq_in_error() 767 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) in cxio_next_hw_cqe() argument 771 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe() 772 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe() 777 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq) in cxio_next_sw_cqe() argument 781 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { in cxio_next_sw_cqe() 782 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe() [all …]
|
D | cxio_hal.h | 158 int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 160 int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel); 161 int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 162 int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 169 int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 194 void cxio_flush_hw_cq(struct t3_cq *cq); [all …]
|
D | iwch_provider.c | 136 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); in iwch_destroy_cq() 140 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); in iwch_destroy_cq() 193 chp->cq.size_log2 = ilog2(entries); in iwch_create_cq() 195 if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { in iwch_create_cq() 200 chp->ibcq.cqe = 1 << chp->cq.size_log2; in iwch_create_cq() 205 if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { in iwch_create_cq() 206 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); in iwch_create_cq() 219 uresp.cqid = chp->cq.cqid; in iwch_create_cq() 220 uresp.size_log2 = chp->cq.size_log2; in iwch_create_cq() 226 mm->addr = virt_to_phys(chp->cq.queue); in iwch_create_cq() [all …]
|
D | iwch_cq.c | 55 rd_cqe = cxio_next_cqe(&chp->cq); in iwch_poll_cq_one() 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one() 71 credit, chp->cq.cqid); in iwch_poll_cq_one() 72 cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); in iwch_poll_cq_one()
|
D | iwch_qp.c | 822 cxio_flush_hw_cq(&rchp->cq); in __flush_qp() 823 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp() 824 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp() 836 cxio_flush_hw_cq(&schp->cq); in __flush_qp() 837 cxio_count_scqes(&schp->cq, &qhp->wq, &count); in __flush_qp() 838 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); in __flush_qp() 863 cxio_set_cq_in_error(&rchp->cq); in flush_qp() 868 cxio_set_cq_in_error(&schp->cq); in flush_qp()
|
D | iwch_ev.c | 91 event.element.cq = &chp->ibcq; in post_qp_event()
|
D | iwch_provider.h | 106 struct t3_cq cq; member
|
/linux-4.4.14/include/linux/mlx5/ |
D | cq.h | 128 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) in mlx5_cq_set_ci() argument 130 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); in mlx5_cq_set_ci() 138 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, in mlx5_cq_arm() argument 147 sn = cq->arm_sn & 3; in mlx5_cq_arm() 150 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx5_cq_arm() 158 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm() 165 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 167 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 168 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 170 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, [all …]
|
/linux-4.4.14/drivers/isdn/mISDN/ |
D | dsp_hwec.c | 55 struct mISDN_ctrl_req cq; in dsp_hwec_enable() local 97 memset(&cq, 0, sizeof(cq)); in dsp_hwec_enable() 98 cq.op = MISDN_CTRL_HFC_ECHOCAN_ON; in dsp_hwec_enable() 99 cq.p1 = deftaps; in dsp_hwec_enable() 100 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_enable() 109 struct mISDN_ctrl_req cq; in dsp_hwec_disable() local 118 memset(&cq, 0, sizeof(cq)); in dsp_hwec_disable() 119 cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF; in dsp_hwec_disable() 120 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_disable()
|
D | hwchannel.c | 165 mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq) in mISDN_ctrl_bchannel() argument 169 switch (cq->op) { in mISDN_ctrl_bchannel() 171 cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY | in mISDN_ctrl_bchannel() 175 if (cq->p1) { in mISDN_ctrl_bchannel() 176 memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE); in mISDN_ctrl_bchannel() 184 cq->p2 = bch->dropcnt; in mISDN_ctrl_bchannel() 185 if (cq->p1) in mISDN_ctrl_bchannel() 192 if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE) in mISDN_ctrl_bchannel() 193 bch->next_maxlen = cq->p2; in mISDN_ctrl_bchannel() 194 if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE) in mISDN_ctrl_bchannel() [all …]
|
D | dsp_core.c | 192 struct mISDN_ctrl_req cq; in dsp_rx_off_member() local 195 memset(&cq, 0, sizeof(cq)); in dsp_rx_off_member() 224 cq.op = MISDN_CTRL_RX_OFF; in dsp_rx_off_member() 225 cq.p1 = rx_off; in dsp_rx_off_member() 226 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { in dsp_rx_off_member() 259 struct mISDN_ctrl_req cq; in dsp_fill_empty() local 261 memset(&cq, 0, sizeof(cq)); in dsp_fill_empty() 269 cq.op = MISDN_CTRL_FILL_EMPTY; in dsp_fill_empty() 270 cq.p1 = 1; in dsp_fill_empty() 271 cq.p2 = dsp_silence; in dsp_fill_empty() [all …]
|
D | l1oip_core.c | 949 channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) in channel_dctrl() argument 954 switch (cq->op) { in channel_dctrl() 956 cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER in channel_dctrl() 960 hc->remoteip = (u32)cq->p1; in channel_dctrl() 961 hc->remoteport = cq->p2 & 0xffff; in channel_dctrl() 962 hc->localport = cq->p2 >> 16; in channel_dctrl() 981 cq->p1 = hc->remoteip; in channel_dctrl() 982 cq->p2 = hc->remoteport | (hc->localport << 16); in channel_dctrl() 986 __func__, cq->op); in channel_dctrl() 1185 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument [all …]
|
D | socket.c | 291 struct mISDN_ctrl_req cq; in data_sock_ioctl_bound() local 302 if (copy_from_user(&cq, p, sizeof(cq))) { in data_sock_ioctl_bound() 309 if (bchan->nr == cq.channel) { in data_sock_ioctl_bound() 311 CONTROL_CHANNEL, &cq); in data_sock_ioctl_bound() 317 CONTROL_CHANNEL, &cq); in data_sock_ioctl_bound() 320 if (copy_to_user(p, &cq, sizeof(cq))) in data_sock_ioctl_bound()
|
D | dsp_cmx.c | 364 struct mISDN_ctrl_req cq; in dsp_cmx_hw_message() local 366 memset(&cq, 0, sizeof(cq)); in dsp_cmx_hw_message() 367 cq.op = message; in dsp_cmx_hw_message() 368 cq.p1 = param1 | (param2 << 8); in dsp_cmx_hw_message() 369 cq.p2 = param3 | (param4 << 8); in dsp_cmx_hw_message() 371 dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq); in dsp_cmx_hw_message()
|
/linux-4.4.14/include/linux/mlx4/ |
D | cq.h | 139 static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, in mlx4_cq_arm() argument 147 sn = cq->arm_sn & 3; in mlx4_cq_arm() 148 ci = cq->cons_index & 0xffffff; in mlx4_cq_arm() 150 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx4_cq_arm() 158 doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn); in mlx4_cq_arm() 164 static inline void mlx4_cq_set_ci(struct mlx4_cq *cq) in mlx4_cq_set_ci() argument 166 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); in mlx4_cq_set_ci() 174 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, 176 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
|
D | device.h | 812 int cq; member 1095 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 1097 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
|
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.c | 125 struct ehea_cq *cq; in ehea_create_cq() local 132 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in ehea_create_cq() 133 if (!cq) in ehea_create_cq() 136 cq->attr.max_nr_of_cqes = nr_of_cqe; in ehea_create_cq() 137 cq->attr.cq_token = cq_token; in ehea_create_cq() 138 cq->attr.eq_handle = eq_handle; in ehea_create_cq() 140 cq->adapter = adapter; in ehea_create_cq() 142 cq_handle_ref = &cq->fw_handle; in ehea_create_cq() 146 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, in ehea_create_cq() 147 &cq->fw_handle, &cq->epas); in ehea_create_cq() [all …]
|
D | ehea_hw.h | 246 static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes) in ehea_update_feca() argument 248 struct h_epa epa = cq->epas.kernel; in ehea_update_feca() 253 static inline void ehea_reset_cq_n1(struct ehea_cq *cq) in ehea_reset_cq_n1() argument 255 struct h_epa epa = cq->epas.kernel; in ehea_reset_cq_n1()
|
D | ehea_qmr.h | 347 static inline void ehea_inc_cq(struct ehea_cq *cq) in ehea_inc_cq() argument 349 hw_qeit_inc(&cq->hw_queue); in ehea_inc_cq() 381 int ehea_destroy_cq(struct ehea_cq *cq);
|
/linux-4.4.14/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_hw.c | 123 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); in ocrdma_get_mcqe() 132 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); in ocrdma_mcq_inc_tail() 521 struct ocrdma_queue_info *cq, in ocrdma_mbx_mq_cq_create() argument 533 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) << in ocrdma_mbx_mq_cq_create() 535 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size); in ocrdma_mbx_mq_cq_create() 539 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); in ocrdma_mbx_mq_cq_create() 541 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, in ocrdma_mbx_mq_cq_create() 542 cq->dma, PAGE_SIZE_4K); in ocrdma_mbx_mq_cq_create() 546 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); in ocrdma_mbx_mq_cq_create() 547 cq->created = true; in ocrdma_mbx_mq_cq_create() [all …]
|
D | ocrdma_verbs.c | 1032 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, in ocrdma_copy_cq_uresp() argument 1041 uresp.cq_id = cq->id; in ocrdma_copy_cq_uresp() 1042 uresp.page_size = PAGE_ALIGN(cq->len); in ocrdma_copy_cq_uresp() 1044 uresp.max_hw_cqe = cq->max_hw_cqe; in ocrdma_copy_cq_uresp() 1045 uresp.page_addr[0] = virt_to_phys(cq->va); in ocrdma_copy_cq_uresp() 1048 uresp.phase_change = cq->phase_change ? 1 : 0; in ocrdma_copy_cq_uresp() 1052 __func__, dev->id, cq->id); in ocrdma_copy_cq_uresp() 1063 cq->ucontext = uctx; in ocrdma_copy_cq_uresp() 1074 struct ocrdma_cq *cq; in ocrdma_create_cq() local 1089 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in ocrdma_create_cq() [all …]
|
D | ocrdma.h | 158 struct ocrdma_queue_info cq; member 499 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) in is_cqe_valid() argument 503 return (cqe_valid == cq->phase); in is_cqe_valid()
|
/linux-4.4.14/drivers/infiniband/core/ |
D | verbs.c | 471 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; in ib_create_srq() 473 atomic_inc(&srq->ext.xrc.cq->usecnt); in ib_create_srq() 506 struct ib_cq *uninitialized_var(cq); in ib_destroy_srq() 516 cq = srq->ext.xrc.cq; in ib_destroy_srq() 524 atomic_dec(&cq->usecnt); in ib_destroy_srq() 1160 struct ib_cq *cq; in ib_create_cq() local 1162 cq = device->create_cq(device, cq_attr, NULL, NULL); in ib_create_cq() 1164 if (!IS_ERR(cq)) { in ib_create_cq() 1165 cq->device = device; in ib_create_cq() 1166 cq->uobject = NULL; in ib_create_cq() [all …]
|
D | uverbs_cmd.c | 226 static void put_cq_read(struct ib_cq *cq) in put_cq_read() argument 228 put_uobj_read(cq->uobject); in put_cq_read() 1363 struct ib_cq *cq; in create_cq() local 1398 cq = ib_dev->create_cq(ib_dev, &attr, in create_cq() 1400 if (IS_ERR(cq)) { in create_cq() 1401 ret = PTR_ERR(cq); in create_cq() 1405 cq->device = ib_dev; in create_cq() 1406 cq->uobject = &obj->uobject; in create_cq() 1407 cq->comp_handler = ib_uverbs_comp_handler; in create_cq() 1408 cq->event_handler = ib_uverbs_cq_event_handler; in create_cq() [all …]
|
D | uverbs_main.c | 269 struct ib_cq *cq = uobj->object; in ib_uverbs_cleanup_ucontext() local 270 struct ib_uverbs_event_file *ev_file = cq->cq_context; in ib_uverbs_cleanup_ucontext() 275 ib_destroy_cq(cq); in ib_uverbs_cleanup_ucontext() 465 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) in ib_uverbs_comp_handler() argument 487 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); in ib_uverbs_comp_handler() 489 entry->desc.comp.cq_handle = cq->uobject->user_handle; in ib_uverbs_comp_handler() 536 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, in ib_uverbs_cq_event_handler()
|
D | mad_priv.h | 200 struct ib_cq *cq; member
|
D | mad.c | 2560 ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_completion_handler() 2562 while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { in ib_mad_completion_handler() 2858 static void ib_mad_thread_completion_handler(struct ib_cq *cq, void *arg) in ib_mad_thread_completion_handler() argument 2860 struct ib_mad_port_private *port_priv = cq->cq_context; in ib_mad_thread_completion_handler() 3042 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); in ib_mad_port_start() 3105 qp_init_attr.send_cq = qp_info->port_priv->cq; in create_mad_qp() 3106 qp_init_attr.recv_cq = qp_info->port_priv->cq; in create_mad_qp() 3183 port_priv->cq = ib_create_cq(port_priv->device, in ib_mad_port_open() 3186 if (IS_ERR(port_priv->cq)) { in ib_mad_port_open() 3188 ret = PTR_ERR(port_priv->cq); in ib_mad_port_open() [all …]
|
D | uverbs.h | 199 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context);
|
/linux-4.4.14/net/9p/ |
D | trans_rdma.c | 96 struct ib_cq *cq; member 342 static void cq_comp_handler(struct ib_cq *cq, void *cq_context) in cq_comp_handler() argument 349 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); in cq_comp_handler() 350 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { in cq_comp_handler() 389 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans() 390 ib_destroy_cq(rdma->cq); in rdma_destroy_trans() 699 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, in rdma_create_trans() 702 if (IS_ERR(rdma->cq)) in rdma_create_trans() 704 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); in rdma_create_trans() 721 qp_attr.send_cq = rdma->cq; in rdma_create_trans() [all …]
|
/linux-4.4.14/drivers/net/ethernet/brocade/bna/ |
D | bna_tx_rx.c | 575 rxf->rit[offset] = rxp->cq.ccb->id; in bna_rit_init() 1483 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); in bna_rx_sm_started_entry() 1672 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, in bna_bfi_rx_enet_start() 1673 &rxp->cq.qpt); in bna_bfi_rx_enet_start() 1676 rxp->cq.ib.ib_seg_host_addr.lsb; in bna_bfi_rx_enet_start() 1678 rxp->cq.ib.ib_seg_host_addr.msb; in bna_bfi_rx_enet_start() 1680 htons((u16)rxp->cq.ib.intr_vector); in bna_bfi_rx_enet_start() 1687 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) in bna_bfi_rx_enet_start() 1691 htonl((u32)rxp->cq.ib.coalescing_timeo); in bna_bfi_rx_enet_start() 1693 htonl((u32)rxp->cq.ib.interpkt_timeo); in bna_bfi_rx_enet_start() [all …]
|
D | bna_types.h | 631 struct bna_cq *cq; member 699 struct bna_cq cq; member
|
D | bnad.c | 604 struct bna_cq_entry *cq, *cmpl, *next_cmpl; in bnad_cq_process() local 617 cq = ccb->sw_q; in bnad_cq_process() 620 cmpl = &cq[ccb->producer_index]; in bnad_cq_process() 667 next_cmpl = &cq[pi]; in bnad_cq_process() 737 cmpl = &cq[ccb->producer_index]; in bnad_cq_process() 1037 (struct bnad_rx_info *)ccb->cq->rx->priv; in bnad_cb_ccb_setup() 1047 (struct bnad_rx_info *)ccb->cq->rx->priv; in bnad_cb_ccb_destroy()
|
D | bfi_enet.h | 503 struct bfi_enet_cq cq; member
|
/linux-4.4.14/drivers/atm/ |
D | ambassador.c | 557 amb_cq * cq = &dev->cq; in command_do() local 558 volatile amb_cq_ptrs * ptrs = &cq->ptrs; in command_do() 566 spin_lock (&cq->lock); in command_do() 569 if (cq->pending < cq->maximum) { in command_do() 578 cq->pending++; in command_do() 584 if (cq->pending > cq->high) in command_do() 585 cq->high = cq->pending; in command_do() 586 spin_unlock (&cq->lock); in command_do() 591 msleep(cq->pending); in command_do() 609 spin_lock (&cq->lock); in command_do() [all …]
|
D | ambassador.h | 633 amb_cq cq; member
|
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 323 struct cmp_queue *cq, int q_len) in nicvf_init_cmp_queue() argument 327 err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, in nicvf_init_cmp_queue() 332 cq->desc = cq->dmem.base; in nicvf_init_cmp_queue() 333 cq->thresh = CMP_QUEUE_CQE_THRESH; in nicvf_init_cmp_queue() 339 static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) in nicvf_free_cmp_queue() argument 341 if (!cq) in nicvf_free_cmp_queue() 343 if (!cq->dmem.base) in nicvf_free_cmp_queue() 346 nicvf_free_q_desc_mem(nic, &cq->dmem); in nicvf_free_cmp_queue() 563 struct cmp_queue *cq; in nicvf_cmp_queue_config() local 566 cq = &qs->cq[qidx]; in nicvf_cmp_queue_config() [all …]
|
D | nicvf_main.c | 509 struct cmp_queue *cq, in nicvf_snd_pkt_handler() argument 529 nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); in nicvf_snd_pkt_handler() 569 struct cmp_queue *cq, in nicvf_rcv_pkt_handler() argument 586 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); in nicvf_rcv_pkt_handler() 641 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler() local 645 spin_lock_bh(&cq->lock); in nicvf_cq_intr_handler() 662 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); in nicvf_cq_intr_handler() 664 cqe_head &= (cq->dmem.q_len - 1); in nicvf_cq_intr_handler() 666 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); in nicvf_cq_intr_handler() 677 nicvf_rcv_pkt_handler(netdev, napi, cq, in nicvf_cq_intr_handler() [all …]
|
D | nicvf_queues.h | 290 struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; member 348 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx); 350 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
|
/linux-4.4.14/net/sunrpc/ |
D | cache.c | 926 struct cache_queue *cq; in cache_poll() local 938 for (cq= &rp->q; &cq->list != &cd->queue; in cache_poll() 939 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_poll() 940 if (!cq->reader) { in cache_poll() 954 struct cache_queue *cq; in cache_ioctl() local 964 for (cq= &rp->q; &cq->list != &cd->queue; in cache_ioctl() 965 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_ioctl() 966 if (!cq->reader) { in cache_ioctl() 968 container_of(cq, struct cache_request, q); in cache_ioctl() 1010 struct cache_queue *cq; in cache_release() local [all …]
|
/linux-4.4.14/include/rdma/ |
D | ib_verbs.h | 484 struct ib_cq *cq; member 814 struct ib_cq *cq; member 1308 typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); 1332 struct ib_cq *cq; member 1713 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, 1715 int (*destroy_cq)(struct ib_cq *cq); 1716 int (*resize_cq)(struct ib_cq *cq, int cqe, 1718 int (*poll_cq)(struct ib_cq *cq, int num_entries, 1720 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 1721 int (*req_notify_cq)(struct ib_cq *cq, [all …]
|
/linux-4.4.14/drivers/infiniband/ulp/iser/ |
D | iser_verbs.c | 50 static void iser_cq_callback(struct ib_cq *cq, void *cq_context); 119 comp->cq = ib_create_cq(device->ib_device, in iser_create_device_ib_res() 124 if (IS_ERR(comp->cq)) { in iser_create_device_ib_res() 125 comp->cq = NULL; in iser_create_device_ib_res() 129 if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP)) in iser_create_device_ib_res() 163 if (comp->cq) in iser_create_device_ib_res() 164 ib_destroy_cq(comp->cq); in iser_create_device_ib_res() 186 ib_destroy_cq(comp->cq); in iser_free_device_ib_res() 187 comp->cq = NULL; in iser_free_device_ib_res() 493 init_attr.send_cq = ib_conn->comp->cq; in iser_create_ib_conn_res() [all …]
|
D | iscsi_iser.h | 345 struct ib_cq *cq; member
|
/linux-4.4.14/drivers/isdn/hardware/mISDN/ |
D | speedfax.c | 221 channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 225 switch (cq->op) { in channel_ctrl() 227 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; in channel_ctrl() 231 if (cq->channel < 0 || cq->channel > 3) { in channel_ctrl() 235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); in channel_ctrl() 238 ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1); in channel_ctrl() 241 pr_info("%s: unknown Op %x\n", sf->name, cq->op); in channel_ctrl()
|
D | avmfritz.c | 843 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 845 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 880 channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 884 switch (cq->op) { in channel_ctrl() 886 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; in channel_ctrl() 890 if (cq->channel < 0 || cq->channel > 3) { in channel_ctrl() 894 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); in channel_ctrl() 897 ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1); in channel_ctrl() 900 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); in channel_ctrl()
|
D | hfcpci.c | 1534 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 1536 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 1791 channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 1796 switch (cq->op) { in channel_ctrl() 1798 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | in channel_ctrl() 1803 if (cq->channel < 0 || cq->channel > 2) { in channel_ctrl() 1807 if (cq->channel & 1) { in channel_ctrl() 1819 if (cq->channel & 2) { in channel_ctrl() 1831 if (cq->channel & 3) in channel_ctrl() 1841 if (cq->channel == cq->p1) { in channel_ctrl() [all …]
|
D | netjet.c | 797 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 799 return mISDN_ctrl_bchannel(&bc->bch, cq); in channel_bctrl() 835 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 839 switch (cq->op) { in channel_ctrl() 841 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; in channel_ctrl() 845 if (cq->channel < 0 || cq->channel > 3) { in channel_ctrl() 849 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); in channel_ctrl() 852 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1); in channel_ctrl() 855 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); in channel_ctrl()
|
D | mISDNipac.c | 1397 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 1399 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 1514 channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 1518 switch (cq->op) { in channel_ctrl() 1520 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; in channel_ctrl() 1524 if (cq->channel < 0 || cq->channel > 3) { in channel_ctrl() 1528 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); in channel_ctrl() 1531 ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1); in channel_ctrl() 1534 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); in channel_ctrl()
|
D | hfcmulti.c | 3566 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 3570 (struct dsp_features *)(*((u_long *)&cq->p1)); in channel_bctrl() 3578 switch (cq->op) { in channel_bctrl() 3580 ret = mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 3581 cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP; in channel_bctrl() 3584 ret = mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 3585 hc->chan[bch->slot].rx_off = !!cq->p1; in channel_bctrl() 3598 ret = mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 3622 slot_tx = cq->p1 & 0xff; in channel_bctrl() 3623 bank_tx = cq->p1 >> 8; in channel_bctrl() [all …]
|
D | w6692.c | 1002 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 1004 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl() 1025 channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 1029 switch (cq->op) { in channel_ctrl() 1031 cq->op = MISDN_CTRL_L1_TIMER3; in channel_ctrl() 1034 ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); in channel_ctrl() 1037 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); in channel_ctrl()
|
D | hfcsusb.c | 504 channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq) in channel_ctrl() argument 510 hw->name, __func__, (cq->op), (cq->channel)); in channel_ctrl() 512 switch (cq->op) { in channel_ctrl() 514 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | in channel_ctrl() 519 hw->name, __func__, cq->op); in channel_ctrl() 806 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 808 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl()
|
D | mISDNisar.c | 1574 channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) in channel_bctrl() argument 1576 return mISDN_ctrl_bchannel(bch, cq); in channel_bctrl()
|
/linux-4.4.14/kernel/locking/ |
D | lockdep.c | 916 static inline void __cq_init(struct circular_queue *cq) in __cq_init() argument 918 cq->front = cq->rear = 0; in __cq_init() 922 static inline int __cq_empty(struct circular_queue *cq) in __cq_empty() argument 924 return (cq->front == cq->rear); in __cq_empty() 927 static inline int __cq_full(struct circular_queue *cq) in __cq_full() argument 929 return ((cq->rear + 1) & CQ_MASK) == cq->front; in __cq_full() 932 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) in __cq_enqueue() argument 934 if (__cq_full(cq)) in __cq_enqueue() 937 cq->element[cq->rear] = elem; in __cq_enqueue() 938 cq->rear = (cq->rear + 1) & CQ_MASK; in __cq_enqueue() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/usnic/ |
D | usnic_ib_verbs.h | 69 int usnic_ib_destroy_cq(struct ib_cq *cq); 88 int usnic_ib_req_notify_cq(struct ib_cq *cq,
|
D | usnic_ib_verbs.c | 597 struct ib_cq *cq; in usnic_ib_create_cq() local 603 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in usnic_ib_create_cq() 604 if (!cq) in usnic_ib_create_cq() 607 return cq; in usnic_ib_create_cq() 610 int usnic_ib_destroy_cq(struct ib_cq *cq) in usnic_ib_destroy_cq() argument 613 kfree(cq); in usnic_ib_destroy_cq() 776 int usnic_ib_req_notify_cq(struct ib_cq *cq, in usnic_ib_req_notify_cq() argument
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | verbs.c | 154 rpcrdma_sendcq_poll(struct ib_cq *cq) in rpcrdma_sendcq_poll() argument 162 rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos); in rpcrdma_sendcq_poll() 176 rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) in rpcrdma_sendcq_upcall() argument 179 rpcrdma_sendcq_poll(cq); in rpcrdma_sendcq_upcall() 180 } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP | in rpcrdma_sendcq_upcall() 235 rpcrdma_recvcq_poll(struct ib_cq *cq) in rpcrdma_recvcq_poll() argument 243 rc = ib_poll_cq(cq, ARRAY_SIZE(wcs), pos); in rpcrdma_recvcq_poll() 256 rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) in rpcrdma_recvcq_upcall() argument 259 rpcrdma_recvcq_poll(cq); in rpcrdma_recvcq_upcall() 260 } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP | in rpcrdma_recvcq_upcall() [all …]
|
D | svc_rdma_transport.c | 300 static void rq_comp_handler(struct ib_cq *cq, void *cq_context) in rq_comp_handler() argument 441 struct ib_cq *cq = xprt->sc_sq_cq; in sq_cq_reap() local 451 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) { in sq_cq_reap() 482 static void sq_comp_handler(struct ib_cq *cq, void *cq_context) in sq_comp_handler() argument
|
/linux-4.4.14/drivers/infiniband/hw/nes/ |
D | nes_hw.c | 66 static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq); 70 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); 2311 struct nes_hw_cq *cq; in nes_process_ceq() local 2325 cq = *((struct nes_hw_cq **)&u64temp); in nes_process_ceq() 2331 cq->ce_handler(nesdev, cq); in nes_process_ceq() 2772 static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) in nes_nic_napi_ce_handler() argument 2774 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); in nes_nic_napi_ce_handler() 2788 void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) in nes_nic_ce_handler() argument 2793 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq); in nes_nic_ce_handler() 2814 head = cq->cq_head; in nes_nic_ce_handler() [all …]
|
D | nes_mgt.c | 773 static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) in nes_mgt_ce_handler() argument 775 struct nes_vnic_mgt *mgtvnic = container_of(cq, struct nes_vnic_mgt, mgt_cq); in nes_mgt_ce_handler() 788 head = cq->cq_head; in nes_mgt_ce_handler() 789 cq_size = cq->cq_size; in nes_mgt_ce_handler() 792 cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]); in nes_mgt_ce_handler() 798 qp_id = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_ACCQP_ID_IDX]); in nes_mgt_ce_handler() 810 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]); in nes_mgt_ce_handler() 833 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0; in nes_mgt_ce_handler() 840 nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (cqe_count << 16)); in nes_mgt_ce_handler() 850 cq->cq_head = head; in nes_mgt_ce_handler() [all …]
|
D | nes_hw.h | 937 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); 968 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
|
/linux-4.4.14/drivers/scsi/be2iscsi/ |
D | be_cmds.c | 406 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq; in be_mcc_compl_get() 518 hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0); in beiscsi_process_mcc() 949 struct be_queue_info *cq, struct be_queue_info *eq, in beiscsi_cmd_cq_create() argument 956 struct be_dma_mem *q_mem = &cq->dma_mem; in beiscsi_cmd_cq_create() 974 __ilog2_u32(cq->len / 256)); in beiscsi_cmd_cq_create() 990 __ilog2_u32(cq->len / 256)); in beiscsi_cmd_cq_create() 1003 cq->id = le16_to_cpu(resp->cq_id); in beiscsi_cmd_cq_create() 1004 cq->created = true; in beiscsi_cmd_cq_create() 1025 struct be_queue_info *cq) in beiscsi_cmd_mccq_create() argument 1053 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); in beiscsi_cmd_mccq_create() [all …]
|
D | be.h | 102 struct be_queue_info *cq; member 109 struct be_queue_info cq; member
|
D | be_main.c | 863 mcc = &phba->ctrl.mcc_obj.cq; in be_isr_mcc() 900 struct be_queue_info *cq; in be_isr_msix() local 906 cq = pbe_eq->cq; in be_isr_msix() 959 mcc = &phba->ctrl.mcc_obj.cq; in be_isr() 2040 mcc_cq = &phba->ctrl.mcc_obj.cq; in beiscsi_process_mcc_isr() 2092 struct be_queue_info *cq; in beiscsi_process_cq() local 2104 cq = pbe_eq->cq; in beiscsi_process_cq() 2105 sol = queue_tail_node(cq); in beiscsi_process_cq() 2148 hwi_ring_cq_db(phba, cq->id, in beiscsi_process_cq() 2255 queue_tail_inc(cq); in beiscsi_process_cq() [all …]
|
D | be_cmds.h | 719 struct be_queue_info *cq, struct be_queue_info *eq, 727 struct be_queue_info *cq); 759 struct be_queue_info *cq,
|
/linux-4.4.14/drivers/scsi/lpfc/ |
D | lpfc_sli.c | 11791 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, in lpfc_sli4_sp_handle_els_wcqe() argument 11796 struct lpfc_sli_ring *pring = cq->pring; in lpfc_sli4_sp_handle_els_wcqe() 11869 struct lpfc_queue *cq, in lpfc_sli4_sp_handle_abort_xri_wcqe() argument 11886 switch (cq->subtype) { in lpfc_sli4_sp_handle_abort_xri_wcqe() 11908 cq->subtype); in lpfc_sli4_sp_handle_abort_xri_wcqe() 11996 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, in lpfc_sli4_sp_handle_cqe() argument 12010 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, in lpfc_sli4_sp_handle_cqe() 12021 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, in lpfc_sli4_sp_handle_cqe() 12057 struct lpfc_queue *cq = NULL, *childq; in lpfc_sli4_sp_handle_eqe() local 12068 cq = childq; in lpfc_sli4_sp_handle_eqe() [all …]
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
D | bnx2fc_tgt.c | 689 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, in bnx2fc_alloc_session_resc() 691 if (!tgt->cq) { in bnx2fc_alloc_session_resc() 696 memset(tgt->cq, 0, tgt->cq_mem_size); in bnx2fc_alloc_session_resc() 889 if (tgt->cq) { in bnx2fc_free_session_resc() 891 tgt->cq, tgt->cq_dma); in bnx2fc_free_session_resc() 892 tgt->cq = NULL; in bnx2fc_free_session_resc()
|
D | bnx2fc_hwi.c | 1012 struct fcoe_cqe *cq; in bnx2fc_process_new_cqes() local 1026 if (!tgt->cq) { in bnx2fc_process_new_cqes() 1031 cq = tgt->cq; in bnx2fc_process_new_cqes() 1033 cqe = &cq[cq_cons]; in bnx2fc_process_new_cqes() 1074 cqe = cq; in bnx2fc_process_new_cqes()
|
D | bnx2fc.h | 324 struct fcoe_cqe *cq; member
|
/linux-4.4.14/net/rds/ |
D | ib_cm.c | 227 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_recv() argument 232 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_recv() 239 static void poll_cq(struct rds_ib_connection *ic, struct ib_cq *cq, in poll_cq() argument 247 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { in poll_cq() 331 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_send() argument 336 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_send()
|
D | iw.h | 332 void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context); 357 void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
|
D | iw_send.c | 188 void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context) in rds_iw_send_cq_comp_handler() argument 199 rdsdebug("cq %p conn %p\n", cq, conn); in rds_iw_send_cq_comp_handler() 201 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in rds_iw_send_cq_comp_handler() 205 while (ib_poll_cq(cq, 1, &wc) > 0) { in rds_iw_send_cq_comp_handler()
|
D | iw_recv.c | 768 void rds_iw_recv_cq_comp_handler(struct ib_cq *cq, void *context) in rds_iw_recv_cq_comp_handler() argument 773 rdsdebug("conn %p cq %p\n", conn, cq); in rds_iw_recv_cq_comp_handler()
|
/linux-4.4.14/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 1872 static void srpt_process_rcv_completion(struct ib_cq *cq, in srpt_process_rcv_completion() argument 1910 static void srpt_process_send_completion(struct ib_cq *cq, in srpt_process_send_completion() argument 1955 static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch) in srpt_process_completion() argument 1960 WARN_ON(cq != ch->cq); in srpt_process_completion() 1962 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in srpt_process_completion() 1963 while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) { in srpt_process_completion() 1966 srpt_process_rcv_completion(cq, ch, &wc[i]); in srpt_process_completion() 1968 srpt_process_send_completion(cq, ch, &wc[i]); in srpt_process_completion() 1984 static void srpt_completion(struct ib_cq *cq, void *ctx) in srpt_completion() argument 2004 (srpt_process_completion(ch->cq, ch), in srpt_compl_thread() [all …]
|
D | ib_srpt.h | 306 struct ib_cq *cq; member
|
/linux-4.4.14/drivers/net/ethernet/emulex/benet/ |
D | be_main.c | 2242 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); in be_rx_compl_get() 2281 queue_tail_inc(&rxo->cq); in be_rx_compl_get() 2379 struct be_queue_info *tx_cq = &txo->cq; in be_tx_compl_get() 2478 struct be_queue_info *rx_cq = &rxo->cq; in be_rx_cq_clean() 2540 be_cq_notify(adapter, txo->cq.id, false, cmpl); in be_tx_compl_clean() 2648 q = &adapter->mcc_obj.cq; in be_mcc_queues_destroy() 2657 struct be_queue_info *q, *cq; in be_mcc_queues_create() local 2659 cq = &adapter->mcc_obj.cq; in be_mcc_queues_create() 2660 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, in be_mcc_queues_create() 2665 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0)) in be_mcc_queues_create() [all …]
|
D | be_cmds.c | 455 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; in be_mcc_compl_get() 469 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); in be_async_mcc_enable() 480 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); in be_async_mcc_disable() 505 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); in be_process_mcc() 1096 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, in be_cmd_cq_create() argument 1101 struct be_dma_mem *q_mem = &cq->dma_mem; in be_cmd_cq_create() 1124 __ilog2_u32(cq->len / 256)); in be_cmd_cq_create() 1141 __ilog2_u32(cq->len / 256)); in be_cmd_cq_create() 1155 cq->id = le16_to_cpu(resp->cq_id); in be_cmd_cq_create() 1156 cq->created = true; in be_cmd_cq_create() [all …]
|
D | be.h | 226 struct be_queue_info cq; member 259 struct be_queue_info cq; member 314 struct be_queue_info cq; member
|
D | be_cmds.h | 2287 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 2291 struct be_queue_info *cq);
|
/linux-4.4.14/net/ipv4/ |
D | ipconfig.c | 1359 char *cp, *cq; in root_nfs_parse_addr() local 1361 cp = cq = name; in root_nfs_parse_addr() 1365 if (cp == cq || cp - cq > 3) in root_nfs_parse_addr() 1371 cq = cp; in root_nfs_parse_addr()
|
/linux-4.4.14/drivers/net/vmxnet3/ |
D | vmxnet3_defs.h | 134 u32 cq:1; /* completion request */ member 142 u32 cq:1; /* completion request */ member
|
/linux-4.4.14/drivers/net/wireless/orinoco/ |
D | wext.c | 133 } __packed cq; in orinoco_get_wireless_stats() local 136 HERMES_RID_COMMSQUALITY, &cq); in orinoco_get_wireless_stats() 139 wstats->qual.qual = (int)le16_to_cpu(cq.qual); in orinoco_get_wireless_stats() 140 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; in orinoco_get_wireless_stats() 141 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; in orinoco_get_wireless_stats()
|
/linux-4.4.14/drivers/infiniband/ulp/isert/ |
D | ib_isert.h | 195 struct ib_cq *cq; member
|
D | ib_isert.c | 157 attr.send_cq = comp->cq; in isert_create_qp() 158 attr.recv_cq = comp->cq; in isert_create_qp() 282 if (comp->cq) { in isert_free_comps() 284 ib_destroy_cq(comp->cq); in isert_free_comps() 322 comp->cq = ib_create_cq(device->ib_device, in isert_alloc_comps() 327 if (IS_ERR(comp->cq)) { in isert_alloc_comps() 329 ret = PTR_ERR(comp->cq); in isert_alloc_comps() 330 comp->cq = NULL; in isert_alloc_comps() 334 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP); in isert_alloc_comps() 2108 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) { in isert_cq_work() [all …]
|
/linux-4.4.14/include/linux/ |
D | isdnif.h | 263 __u8 cq; member
|
/linux-4.4.14/drivers/s390/net/ |
D | qeth_core_main.c | 335 if (card->options.cq == QETH_CQ_ENABLED) { in qeth_cq_init() 357 if (card->options.cq == QETH_CQ_ENABLED) { in qeth_alloc_cq() 436 if (q->card->options.cq != QETH_CQ_ENABLED) in qeth_cleanup_handled_pending() 514 return card->options.cq == QETH_CQ_ENABLED && in qeth_is_cq() 1403 card->options.cq = QETH_CQ_DISABLED; in qeth_set_intial_options() 2783 if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { in qeth_init_input_buffer() 3572 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) in qeth_configure_cq() argument 3576 if (card->options.cq == QETH_CQ_NOTAVAILABLE) { in qeth_configure_cq() 3580 if (card->options.cq == cq) { in qeth_configure_cq() 3592 card->options.cq = cq; in qeth_configure_cq() [all …]
|
D | qeth_l3_sys.c | 198 if (card->options.cq == QETH_CQ_ENABLED) in qeth_l3_dev_sniffer_store() 278 if (card->options.cq == QETH_CQ_NOTAVAILABLE) in qeth_l3_dev_hsuid_store()
|
D | qeth_core.h | 698 enum qeth_cq cq; member
|
D | qeth_l3_main.c | 2851 (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || in qeth_l3_hard_start_xmit() 2852 ((card->options.cq == QETH_CQ_ENABLED) && in qeth_l3_hard_start_xmit() 3398 if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { in __qeth_l3_set_offline()
|
/linux-4.4.14/lib/raid6/test/ |
D | Makefile | 67 $(AR) cq $@ $^
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 2908 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources() local 2910 if (cq->q.desc) { in t4_free_sge_resources() 2911 tasklet_kill(&cq->qresume_tsk); in t4_free_sge_resources() 2913 cq->q.cntxt_id); in t4_free_sge_resources() 2914 __skb_queue_purge(&cq->sendq); in t4_free_sge_resources() 2915 free_txq(adap, &cq->q); in t4_free_sge_resources() 2965 struct sge_ctrl_txq *cq = &s->ctrlq[i]; in t4_sge_stop() local 2967 if (cq->q.desc) in t4_sge_stop() 2968 tasklet_kill(&cq->qresume_tsk); in t4_sge_stop()
|
D | cxgb4_uld.h | 228 struct cxgb4_range cq; member
|
D | cxgb4_main.c | 4065 adap->vres.cq.start = val[2]; in adap_init0() 4066 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0() 4402 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; in cfg_queues()
|
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
D | o2iblnd.c | 653 struct ib_cq *cq; in kiblnd_create_conn() local 749 cq = ib_create_cq(cmid->device, in kiblnd_create_conn() 752 if (IS_ERR(cq)) { in kiblnd_create_conn() 754 PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); in kiblnd_create_conn() 758 conn->ibc_cq = cq; in kiblnd_create_conn() 760 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in kiblnd_create_conn() 774 init_qp_attr->send_cq = cq; in kiblnd_create_conn() 775 init_qp_attr->recv_cq = cq; in kiblnd_create_conn()
|
D | o2iblnd.h | 974 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
|
D | o2iblnd_cb.c | 3231 kiblnd_cq_completion(struct ib_cq *cq, void *arg) in kiblnd_cq_completion() argument 3242 LASSERT(cq == conn->ibc_cq); in kiblnd_cq_completion()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 155 } cq; member 634 i, q->consumer_counter, q->u.cq.comp_sdq_count, in mlxsw_pci_cq_dbg_read() 635 q->u.cq.comp_rdq_count, q->count); in mlxsw_pci_cq_dbg_read() 741 q->u.cq.comp_sdq_count++; in mlxsw_pci_cq_tasklet() 748 q->u.cq.comp_rdq_count++; in mlxsw_pci_cq_tasklet()
|
D | cmd.h | 821 MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib.h | 457 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr); 458 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr);
|
D | ipoib_ib.c | 481 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) in ipoib_ib_completion() argument 503 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) in ipoib_send_comp_handler() argument
|
/linux-4.4.14/Documentation/isdn/ |
D | INTERFACE.fax | 137 - cq
|
/linux-4.4.14/Documentation/infiniband/ |
D | core_locking.txt | 86 ib_req_notify_cq(cq, ...);
|
/linux-4.4.14/drivers/isdn/i4l/ |
D | isdn_ttyfax.c | 618 sprintf(rs, "\r\n%d", f->cq); in isdn_tty_cmd_FCLASS2() 631 f->cq = par; in isdn_tty_cmd_FCLASS2()
|
D | isdn_tty.c | 1696 f->cq = 0; in isdn_tty_modem_reset_faxpar()
|
/linux-4.4.14/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr); 136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr); 2006 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr) in srp_recv_completion() argument 2011 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in srp_recv_completion() 2012 while (ib_poll_cq(cq, 1, &wc) > 0) { in srp_recv_completion() 2021 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr) in srp_send_completion() argument 2027 while (ib_poll_cq(cq, 1, &wc) > 0) { in srp_send_completion()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | cnic.c | 1799 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; in cnic_setup_bnx2x_ctx() 1800 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; in cnic_setup_bnx2x_ctx() 1801 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; in cnic_setup_bnx2x_ctx() 1814 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; in cnic_setup_bnx2x_ctx() 1815 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = in cnic_setup_bnx2x_ctx() 1817 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = in cnic_setup_bnx2x_ctx()
|
D | cnic_defs.h | 3893 struct ustorm_iscsi_cq_db cq[8]; member
|