cq 542 drivers/atm/ambassador.c amb_cq * cq = &dev->cq; cq 543 drivers/atm/ambassador.c volatile amb_cq_ptrs * ptrs = &cq->ptrs; cq 551 drivers/atm/ambassador.c spin_lock (&cq->lock); cq 554 drivers/atm/ambassador.c if (cq->pending < cq->maximum) { cq 563 drivers/atm/ambassador.c cq->pending++; cq 569 drivers/atm/ambassador.c if (cq->pending > cq->high) cq 570 drivers/atm/ambassador.c cq->high = cq->pending; cq 571 drivers/atm/ambassador.c spin_unlock (&cq->lock); cq 576 drivers/atm/ambassador.c msleep(cq->pending); cq 594 drivers/atm/ambassador.c spin_lock (&cq->lock); cq 595 drivers/atm/ambassador.c cq->pending--; cq 599 drivers/atm/ambassador.c spin_unlock (&cq->lock); cq 603 drivers/atm/ambassador.c cq->filled++; cq 604 drivers/atm/ambassador.c spin_unlock (&cq->lock); cq 1429 drivers/atm/ambassador.c amb_cq * c = &dev->cq; cq 1530 drivers/atm/ambassador.c amb_cq * cq = &dev->cq; cq 1532 drivers/atm/ambassador.c cq->pending = 0; cq 1533 drivers/atm/ambassador.c cq->high = 0; cq 1534 drivers/atm/ambassador.c cq->maximum = cmds - 1; cq 1536 drivers/atm/ambassador.c cq->ptrs.start = cmd; cq 1537 drivers/atm/ambassador.c cq->ptrs.in = cmd; cq 1538 drivers/atm/ambassador.c cq->ptrs.out = cmd; cq 1539 drivers/atm/ambassador.c cq->ptrs.limit = cmd + cmds; cq 1541 drivers/atm/ambassador.c memory = cq->ptrs.limit; cq 1613 drivers/atm/ambassador.c void * memory = dev->cq.ptrs.start; cq 1979 drivers/atm/ambassador.c a.command_start = bus_addr (dev->cq.ptrs.start); cq 1980 drivers/atm/ambassador.c a.command_end = bus_addr (dev->cq.ptrs.limit); cq 2152 drivers/atm/ambassador.c spin_lock_init (&dev->cq.lock); cq 618 drivers/atm/ambassador.h amb_cq cq; cq 183 drivers/dma/fsl-qdma.c struct fsl_qdma_format *cq; cq 504 drivers/dma/fsl-qdma.c queue_temp->cq = cq 510 drivers/dma/fsl-qdma.c if (!queue_temp->cq) cq 516 drivers/dma/fsl-qdma.c queue_temp->virt_head = queue_temp->cq; cq 517 drivers/dma/fsl-qdma.c queue_temp->virt_tail = queue_temp->cq; cq 554 drivers/dma/fsl-qdma.c status_head->cq = dma_alloc_coherent(&pdev->dev, cq 559 drivers/dma/fsl-qdma.c if (!status_head->cq) { cq 564 drivers/dma/fsl-qdma.c status_head->virt_head = status_head->cq; cq 565 drivers/dma/fsl-qdma.c status_head->virt_tail = status_head->cq; cq 671 drivers/dma/fsl-qdma.c if (fsl_status->virt_head == fsl_status->cq cq 673 drivers/dma/fsl-qdma.c fsl_status->virt_head = fsl_status->cq; cq 684 drivers/dma/fsl-qdma.c if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) cq 685 drivers/dma/fsl-qdma.c fsl_status->virt_head = fsl_status->cq; cq 956 drivers/dma/fsl-qdma.c if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) cq 957 drivers/dma/fsl-qdma.c fsl_queue->virt_head = fsl_queue->cq; cq 1233 drivers/dma/fsl-qdma.c status->n_cq, status->cq, status->bus_addr); cq 37 drivers/infiniband/core/cq.c struct ib_cq *cq = dim->priv; cq 44 drivers/infiniband/core/cq.c cq->device->ops.modify_cq(cq, comps, usec); cq 47 drivers/infiniband/core/cq.c static void rdma_dim_init(struct ib_cq *cq) cq 51 drivers/infiniband/core/cq.c if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || cq 52 drivers/infiniband/core/cq.c cq->poll_ctx == IB_POLL_DIRECT) cq 62 drivers/infiniband/core/cq.c dim->priv = cq; cq 63 drivers/infiniband/core/cq.c cq->dim = dim; cq 68 drivers/infiniband/core/cq.c static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, cq 78 drivers/infiniband/core/cq.c while ((n = ib_poll_cq(cq, min_t(u32, batch, cq 84 drivers/infiniband/core/cq.c wc->wr_cqe->done(cq, wc); cq 112 drivers/infiniband/core/cq.c int ib_process_cq_direct(struct ib_cq *cq, int budget) cq 116 drivers/infiniband/core/cq.c return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); cq 120 drivers/infiniband/core/cq.c static void ib_cq_completion_direct(struct ib_cq *cq, void *private) cq 122 drivers/infiniband/core/cq.c WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); cq 127 drivers/infiniband/core/cq.c struct ib_cq *cq = container_of(iop, struct ib_cq, iop); cq 128 drivers/infiniband/core/cq.c struct dim *dim = cq->dim; cq 131 drivers/infiniband/core/cq.c completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); cq 133 drivers/infiniband/core/cq.c irq_poll_complete(&cq->iop); cq 134 drivers/infiniband/core/cq.c if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) cq 135 drivers/infiniband/core/cq.c irq_poll_sched(&cq->iop); cq 144 drivers/infiniband/core/cq.c static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) cq 146 drivers/infiniband/core/cq.c irq_poll_sched(&cq->iop); cq 151 drivers/infiniband/core/cq.c struct ib_cq *cq = container_of(work, struct ib_cq, work); cq 154 drivers/infiniband/core/cq.c completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, cq 157 drivers/infiniband/core/cq.c ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) cq 158 drivers/infiniband/core/cq.c queue_work(cq->comp_wq, &cq->work); cq 159 drivers/infiniband/core/cq.c else if (cq->dim) cq 160 drivers/infiniband/core/cq.c rdma_dim(cq->dim, completed); cq 163 drivers/infiniband/core/cq.c static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) cq 165 drivers/infiniband/core/cq.c queue_work(cq->comp_wq, &cq->work); cq 192 drivers/infiniband/core/cq.c struct ib_cq *cq; cq 195 drivers/infiniband/core/cq.c cq = rdma_zalloc_drv_obj(dev, ib_cq); cq 196 drivers/infiniband/core/cq.c if (!cq) cq 199 drivers/infiniband/core/cq.c cq->device = dev; cq 200 drivers/infiniband/core/cq.c cq->cq_context = private; cq 201 drivers/infiniband/core/cq.c cq->poll_ctx = poll_ctx; cq 202 drivers/infiniband/core/cq.c atomic_set(&cq->usecnt, 0); cq 204 drivers/infiniband/core/cq.c cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); cq 205 drivers/infiniband/core/cq.c if (!cq->wc) cq 208 drivers/infiniband/core/cq.c cq->res.type = RDMA_RESTRACK_CQ; cq 209 drivers/infiniband/core/cq.c rdma_restrack_set_task(&cq->res, caller); cq 211 drivers/infiniband/core/cq.c ret = dev->ops.create_cq(cq, &cq_attr, NULL); cq 215 drivers/infiniband/core/cq.c rdma_restrack_kadd(&cq->res); cq 217 drivers/infiniband/core/cq.c rdma_dim_init(cq); cq 219 drivers/infiniband/core/cq.c switch (cq->poll_ctx) { cq 221 drivers/infiniband/core/cq.c cq->comp_handler = ib_cq_completion_direct; cq 224 drivers/infiniband/core/cq.c cq->comp_handler = ib_cq_completion_softirq; cq 226 drivers/infiniband/core/cq.c irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); cq 227 drivers/infiniband/core/cq.c ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); cq 231 drivers/infiniband/core/cq.c cq->comp_handler = ib_cq_completion_workqueue; cq 232 drivers/infiniband/core/cq.c INIT_WORK(&cq->work, ib_cq_poll_work); cq 233 drivers/infiniband/core/cq.c ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); cq 234 drivers/infiniband/core/cq.c cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? cq 242 drivers/infiniband/core/cq.c return cq; cq 245 drivers/infiniband/core/cq.c rdma_restrack_del(&cq->res); cq 246 drivers/infiniband/core/cq.c cq->device->ops.destroy_cq(cq, udata); cq 248 drivers/infiniband/core/cq.c kfree(cq->wc); cq 250 drivers/infiniband/core/cq.c kfree(cq); cq 288 drivers/infiniband/core/cq.c void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata) cq 290 drivers/infiniband/core/cq.c if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) cq 293 drivers/infiniband/core/cq.c switch (cq->poll_ctx) { cq 297 drivers/infiniband/core/cq.c irq_poll_disable(&cq->iop); cq 301 drivers/infiniband/core/cq.c cancel_work_sync(&cq->work); cq 307 drivers/infiniband/core/cq.c rdma_restrack_del(&cq->res); cq 308 drivers/infiniband/core/cq.c cq->device->ops.destroy_cq(cq, udata); cq 309 drivers/infiniband/core/cq.c if (cq->dim) cq 310 drivers/infiniband/core/cq.c cancel_work_sync(&cq->dim->work); cq 311 drivers/infiniband/core/cq.c kfree(cq->dim); cq 312 drivers/infiniband/core/cq.c kfree(cq->wc); cq 313 drivers/infiniband/core/cq.c kfree(cq); cq 115 drivers/infiniband/core/mad.c static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc); cq 2256 drivers/infiniband/core/mad.c static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 2258 drivers/infiniband/core/mad.c struct ib_mad_port_private *port_priv = cq->cq_context; cq 2507 drivers/infiniband/core/mad.c static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 2509 drivers/infiniband/core/mad.c struct ib_mad_port_private *port_priv = cq->cq_context; cq 3091 drivers/infiniband/core/mad.c ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); cq 3154 drivers/infiniband/core/mad.c qp_init_attr.send_cq = qp_info->port_priv->cq; cq 3155 drivers/infiniband/core/mad.c qp_init_attr.recv_cq = qp_info->port_priv->cq; cq 3234 drivers/infiniband/core/mad.c port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0, cq 3236 drivers/infiniband/core/mad.c if (IS_ERR(port_priv->cq)) { cq 3238 drivers/infiniband/core/mad.c ret = PTR_ERR(port_priv->cq); cq 3281 drivers/infiniband/core/mad.c ib_free_cq(port_priv->cq); cq 3315 drivers/infiniband/core/mad.c ib_free_cq(port_priv->cq); cq 203 drivers/infiniband/core/mad_priv.h struct ib_cq *cq; cq 543 drivers/infiniband/core/nldev.c struct ib_cq *cq = container_of(res, struct ib_cq, res); cq 544 drivers/infiniband/core/nldev.c struct ib_device *dev = cq->device; cq 546 drivers/infiniband/core/nldev.c if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) cq 549 drivers/infiniband/core/nldev.c atomic_read(&cq->usecnt), RDMA_NLDEV_ATTR_PAD)) cq 554 drivers/infiniband/core/nldev.c nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) cq 557 drivers/infiniband/core/nldev.c if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) cq 564 drivers/infiniband/core/nldev.c cq->uobject->context->res.id)) cq 1416 drivers/infiniband/core/nldev.c RES_GET_FUNCS(cq, RDMA_RESTRACK_CQ); cq 234 drivers/infiniband/core/uverbs.h void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context); cq 989 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 1021 drivers/infiniband/core/uverbs_cmd.c cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); cq 1022 drivers/infiniband/core/uverbs_cmd.c if (!cq) { cq 1026 drivers/infiniband/core/uverbs_cmd.c cq->device = ib_dev; cq 1027 drivers/infiniband/core/uverbs_cmd.c cq->uobject = &obj->uobject; cq 1028 drivers/infiniband/core/uverbs_cmd.c cq->comp_handler = ib_uverbs_comp_handler; cq 1029 drivers/infiniband/core/uverbs_cmd.c cq->event_handler = ib_uverbs_cq_event_handler; cq 1030 drivers/infiniband/core/uverbs_cmd.c cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; cq 1031 drivers/infiniband/core/uverbs_cmd.c atomic_set(&cq->usecnt, 0); cq 1033 drivers/infiniband/core/uverbs_cmd.c ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); cq 1037 drivers/infiniband/core/uverbs_cmd.c obj->uobject.object = cq; cq 1040 drivers/infiniband/core/uverbs_cmd.c resp.base.cqe = cq->cqe; cq 1043 drivers/infiniband/core/uverbs_cmd.c cq->res.type = RDMA_RESTRACK_CQ; cq 1044 drivers/infiniband/core/uverbs_cmd.c rdma_restrack_uadd(&cq->res); cq 1056 drivers/infiniband/core/uverbs_cmd.c ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs)); cq 1057 drivers/infiniband/core/uverbs_cmd.c cq = NULL; cq 1059 drivers/infiniband/core/uverbs_cmd.c kfree(cq); cq 1115 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 1122 drivers/infiniband/core/uverbs_cmd.c cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); cq 1123 drivers/infiniband/core/uverbs_cmd.c if (!cq) cq 1126 drivers/infiniband/core/uverbs_cmd.c ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); cq 1130 drivers/infiniband/core/uverbs_cmd.c resp.cqe = cq->cqe; cq 1134 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 1175 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 1183 drivers/infiniband/core/uverbs_cmd.c cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); cq 1184 drivers/infiniband/core/uverbs_cmd.c if (!cq) cq 1193 drivers/infiniband/core/uverbs_cmd.c ret = ib_poll_cq(cq, 1, &wc); cq 1199 drivers/infiniband/core/uverbs_cmd.c ret = copy_wc_to_user(cq->device, data_ptr, &wc); cq 1217 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 1224 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 1231 drivers/infiniband/core/uverbs_cmd.c cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); cq 1232 drivers/infiniband/core/uverbs_cmd.c if (!cq) cq 1235 drivers/infiniband/core/uverbs_cmd.c ib_req_notify_cq(cq, cmd.solicited_only ? cq 1238 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 1350 drivers/infiniband/core/uverbs_cmd.c cq, UVERBS_OBJECT_CQ, cq 1361 drivers/infiniband/core/uverbs_cmd.c scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cq 2896 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 2920 drivers/infiniband/core/uverbs_cmd.c cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); cq 2921 drivers/infiniband/core/uverbs_cmd.c if (!cq) { cq 2926 drivers/infiniband/core/uverbs_cmd.c wq_init_attr.cq = cq; cq 2945 drivers/infiniband/core/uverbs_cmd.c wq->cq = cq; cq 2951 drivers/infiniband/core/uverbs_cmd.c atomic_inc(&cq->usecnt); cq 2966 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 2972 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 3392 drivers/infiniband/core/uverbs_cmd.c attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cq 3394 drivers/infiniband/core/uverbs_cmd.c if (!attr.ext.cq) { cq 3434 drivers/infiniband/core/uverbs_cmd.c srq->ext.cq = attr.ext.cq; cq 3435 drivers/infiniband/core/uverbs_cmd.c atomic_inc(&attr.ext.cq->usecnt); cq 3464 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(attr.ext.cq); cq 3480 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(attr.ext.cq); cq 3677 drivers/infiniband/core/uverbs_cmd.c struct ib_cq *cq; cq 3690 drivers/infiniband/core/uverbs_cmd.c cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); cq 3691 drivers/infiniband/core/uverbs_cmd.c if (!cq) cq 3694 drivers/infiniband/core/uverbs_cmd.c ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); cq 3696 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(cq); cq 417 drivers/infiniband/core/uverbs_main.c void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) cq 439 drivers/infiniband/core/uverbs_main.c uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); cq 441 drivers/infiniband/core/uverbs_main.c entry->desc.comp.cq_handle = cq->uobject->user_handle; cq 488 drivers/infiniband/core/uverbs_main.c struct ib_ucq_object *uobj = container_of(event->element.cq->uobject, cq 41 drivers/infiniband/core/uverbs_std_types_cq.c struct ib_cq *cq = uobject->object; cq 42 drivers/infiniband/core/uverbs_std_types_cq.c struct ib_uverbs_event_queue *ev_queue = cq->cq_context; cq 47 drivers/infiniband/core/uverbs_std_types_cq.c ret = ib_destroy_cq_user(cq, &attrs->driver_udata); cq 71 drivers/infiniband/core/uverbs_std_types_cq.c struct ib_cq *cq; cq 114 drivers/infiniband/core/uverbs_std_types_cq.c cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); cq 115 drivers/infiniband/core/uverbs_std_types_cq.c if (!cq) { cq 120 drivers/infiniband/core/uverbs_std_types_cq.c cq->device = ib_dev; cq 121 drivers/infiniband/core/uverbs_std_types_cq.c cq->uobject = &obj->uobject; cq 122 drivers/infiniband/core/uverbs_std_types_cq.c cq->comp_handler = ib_uverbs_comp_handler; cq 123 drivers/infiniband/core/uverbs_std_types_cq.c cq->event_handler = ib_uverbs_cq_event_handler; cq 124 drivers/infiniband/core/uverbs_std_types_cq.c cq->cq_context = ev_file ? &ev_file->ev_queue : NULL; cq 125 drivers/infiniband/core/uverbs_std_types_cq.c atomic_set(&cq->usecnt, 0); cq 126 drivers/infiniband/core/uverbs_std_types_cq.c cq->res.type = RDMA_RESTRACK_CQ; cq 128 drivers/infiniband/core/uverbs_std_types_cq.c ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); cq 132 drivers/infiniband/core/uverbs_std_types_cq.c obj->uobject.object = cq; cq 134 drivers/infiniband/core/uverbs_std_types_cq.c rdma_restrack_uadd(&cq->res); cq 136 drivers/infiniband/core/uverbs_std_types_cq.c ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe, cq 137 drivers/infiniband/core/uverbs_std_types_cq.c sizeof(cq->cqe)); cq 143 drivers/infiniband/core/uverbs_std_types_cq.c ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs)); cq 144 drivers/infiniband/core/uverbs_std_types_cq.c cq = NULL; cq 146 drivers/infiniband/core/uverbs_std_types_cq.c kfree(cq); cq 987 drivers/infiniband/core/verbs.c srq->ext.cq = srq_init_attr->ext.cq; cq 988 drivers/infiniband/core/verbs.c atomic_inc(&srq->ext.cq->usecnt); cq 1002 drivers/infiniband/core/verbs.c atomic_dec(&srq->ext.cq->usecnt); cq 1040 drivers/infiniband/core/verbs.c atomic_dec(&srq->ext.cq->usecnt); cq 1925 drivers/infiniband/core/verbs.c struct ib_cq *cq; cq 1928 drivers/infiniband/core/verbs.c cq = rdma_zalloc_drv_obj(device, ib_cq); cq 1929 drivers/infiniband/core/verbs.c if (!cq) cq 1932 drivers/infiniband/core/verbs.c cq->device = device; cq 1933 drivers/infiniband/core/verbs.c cq->uobject = NULL; cq 1934 drivers/infiniband/core/verbs.c cq->comp_handler = comp_handler; cq 1935 drivers/infiniband/core/verbs.c cq->event_handler = event_handler; cq 1936 drivers/infiniband/core/verbs.c cq->cq_context = cq_context; cq 1937 drivers/infiniband/core/verbs.c atomic_set(&cq->usecnt, 0); cq 1938 drivers/infiniband/core/verbs.c cq->res.type = RDMA_RESTRACK_CQ; cq 1939 drivers/infiniband/core/verbs.c rdma_restrack_set_task(&cq->res, caller); cq 1941 drivers/infiniband/core/verbs.c ret = device->ops.create_cq(cq, cq_attr, NULL); cq 1943 drivers/infiniband/core/verbs.c kfree(cq); cq 1947 drivers/infiniband/core/verbs.c rdma_restrack_kadd(&cq->res); cq 1948 drivers/infiniband/core/verbs.c return cq; cq 1952 drivers/infiniband/core/verbs.c int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) cq 1954 drivers/infiniband/core/verbs.c return cq->device->ops.modify_cq ? cq 1955 drivers/infiniband/core/verbs.c cq->device->ops.modify_cq(cq, cq_count, cq 1960 drivers/infiniband/core/verbs.c int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata) cq 1962 drivers/infiniband/core/verbs.c if (atomic_read(&cq->usecnt)) cq 1965 drivers/infiniband/core/verbs.c rdma_restrack_del(&cq->res); cq 1966 drivers/infiniband/core/verbs.c cq->device->ops.destroy_cq(cq, udata); cq 1967 drivers/infiniband/core/verbs.c kfree(cq); cq 1972 drivers/infiniband/core/verbs.c int ib_resize_cq(struct ib_cq *cq, int cqe) cq 1974 drivers/infiniband/core/verbs.c return cq->device->ops.resize_cq ? cq 1975 drivers/infiniband/core/verbs.c cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; cq 2286 drivers/infiniband/core/verbs.c wq->cq = wq_attr->cq; cq 2291 drivers/infiniband/core/verbs.c atomic_inc(&wq_attr->cq->usecnt); cq 2305 drivers/infiniband/core/verbs.c struct ib_cq *cq = wq->cq; cq 2313 drivers/infiniband/core/verbs.c atomic_dec(&cq->usecnt); cq 2619 drivers/infiniband/core/verbs.c static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) cq 2632 drivers/infiniband/core/verbs.c struct ib_cq *cq = qp->send_cq; cq 2659 drivers/infiniband/core/verbs.c if (cq->poll_ctx == IB_POLL_DIRECT) cq 2661 drivers/infiniband/core/verbs.c ib_process_cq_direct(cq, -1); cq 2671 drivers/infiniband/core/verbs.c struct ib_cq *cq = qp->recv_cq; cq 2693 drivers/infiniband/core/verbs.c if (cq->poll_ctx == IB_POLL_DIRECT) cq 2695 drivers/infiniband/core/verbs.c ib_process_cq_direct(cq, -1); cq 1009 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_re_cq *cq; cq 1049 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq, cq 1051 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (!cq) { cq 1056 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.scq = &cq->qplib_cq; cq 1057 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->scq = cq; cq 1061 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq, cq 1063 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (!cq) { cq 1068 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.rcq = &cq->qplib_cq; cq 1069 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->rcq = cq; cq 1299 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (qplib_srq->cq) cq 1300 drivers/infiniband/hw/bnxt_re/ib_verbs.c nq = qplib_srq->cq->nq; cq 2517 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_re_cq *cq; cq 2521 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); cq 2522 drivers/infiniband/hw/bnxt_re/ib_verbs.c rdev = cq->rdev; cq 2523 drivers/infiniband/hw/bnxt_re/ib_verbs.c nq = cq->qplib_cq.nq; cq 2525 drivers/infiniband/hw/bnxt_re/ib_verbs.c bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); cq 2526 drivers/infiniband/hw/bnxt_re/ib_verbs.c ib_umem_release(cq->umem); cq 2530 drivers/infiniband/hw/bnxt_re/ib_verbs.c kfree(cq->cql); cq 2538 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq); cq 2550 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->rdev = rdev; cq 2551 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq); cq 2566 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->umem = ib_umem_get(udata, req.cq_va, cq 2569 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (IS_ERR(cq->umem)) { cq 2570 drivers/infiniband/hw/bnxt_re/ib_verbs.c rc = PTR_ERR(cq->umem); cq 2573 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl; cq 2574 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem); cq 2575 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.sg_info.nmap = cq->umem->nmap; cq 2576 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.dpi = &uctx->dpi; cq 2578 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq 2579 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), cq 2581 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (!cq->cql) { cq 2586 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.dpi = &rdev->dpi_privileged; cq 2594 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.max_wqe = entries; cq 2595 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.cnq_hw_ring_id = nq->ring_id; cq 2596 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->qplib_cq.nq = nq; cq 2598 drivers/infiniband/hw/bnxt_re/ib_verbs.c rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq); cq 2604 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->ib_cq.cqe = entries; cq 2605 drivers/infiniband/hw/bnxt_re/ib_verbs.c cq->cq_period = cq->qplib_cq.period; cq 2609 drivers/infiniband/hw/bnxt_re/ib_verbs.c spin_lock_init(&cq->cq_lock); cq 2614 drivers/infiniband/hw/bnxt_re/ib_verbs.c resp.cqid = cq->qplib_cq.id; cq 2615 drivers/infiniband/hw/bnxt_re/ib_verbs.c resp.tail = cq->qplib_cq.hwq.cons; cq 2616 drivers/infiniband/hw/bnxt_re/ib_verbs.c resp.phase = cq->qplib_cq.period; cq 2621 drivers/infiniband/hw/bnxt_re/ib_verbs.c bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq); cq 2629 drivers/infiniband/hw/bnxt_re/ib_verbs.c ib_umem_release(cq->umem); cq 2631 drivers/infiniband/hw/bnxt_re/ib_verbs.c kfree(cq->cql); cq 3116 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); cq 3126 drivers/infiniband/hw/bnxt_re/ib_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 3127 drivers/infiniband/hw/bnxt_re/ib_verbs.c budget = min_t(u32, num_entries, cq->max_cql); cq 3129 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (!cq->cql) { cq 3130 drivers/infiniband/hw/bnxt_re/ib_verbs.c dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); cq 3133 drivers/infiniband/hw/bnxt_re/ib_verbs.c cqe = &cq->cql[0]; cq 3136 drivers/infiniband/hw/bnxt_re/ib_verbs.c ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); cq 3143 drivers/infiniband/hw/bnxt_re/ib_verbs.c dev_err(rdev_to_dev(cq->rdev), cq 3150 drivers/infiniband/hw/bnxt_re/ib_verbs.c ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq, cq 3168 drivers/infiniband/hw/bnxt_re/ib_verbs.c dev_err(rdev_to_dev(cq->rdev), cq 3208 drivers/infiniband/hw/bnxt_re/ib_verbs.c sqp_entry = &cq->rdev->sqp_tbl[tbl_idx]; cq 3232 drivers/infiniband/hw/bnxt_re/ib_verbs.c dev_err(rdev_to_dev(cq->rdev), cq 3242 drivers/infiniband/hw/bnxt_re/ib_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 3249 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); cq 3253 drivers/infiniband/hw/bnxt_re/ib_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 3263 drivers/infiniband/hw/bnxt_re/ib_verbs.c !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) { cq 3267 drivers/infiniband/hw/bnxt_re/ib_verbs.c bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); cq 3270 drivers/infiniband/hw/bnxt_re/ib_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 195 drivers/infiniband/hw/bnxt_re/ib_verbs.h void bnxt_re_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); cq 196 drivers/infiniband/hw/bnxt_re/ib_verbs.h int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc); cq 197 drivers/infiniband/hw/bnxt_re/ib_verbs.h int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 883 drivers/infiniband/hw/bnxt_re/main.c struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq, cq 886 drivers/infiniband/hw/bnxt_re/main.c if (!cq) { cq 891 drivers/infiniband/hw/bnxt_re/main.c if (cq->ib_cq.comp_handler) { cq 893 drivers/infiniband/hw/bnxt_re/main.c (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context); cq 56 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq); cq 57 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp); cq 158 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_cq *cq = nq_work->cq; cq 161 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (cq && nq) { cq 162 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_lock_bh(&cq->compl_lock); cq 163 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (atomic_read(&cq->arm_state) && nq->cqn_handler) { cq 166 drivers/infiniband/hw/bnxt_re/qplib_fp.c __func__, cq, nq); cq 167 drivers/infiniband/hw/bnxt_re/qplib_fp.c nq->cqn_handler(nq, cq); cq 169 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_unlock_bh(&cq->compl_lock); cq 241 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_cq *cq; cq 274 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle; cq 275 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_arm_cq_enable(cq); cq 276 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_lock_bh(&cq->compl_lock); cq 277 drivers/infiniband/hw/bnxt_re/qplib_fp.c atomic_set(&cq->arm_state, 0); cq 278 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!nq->cqn_handler(nq, (cq))) cq 283 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_unlock_bh(&cq->compl_lock); cq 1371 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) cq 1373 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; cq 1794 drivers/infiniband/hw/bnxt_re/qplib_fp.c nq_work->cq = qp->scq; cq 1884 drivers/infiniband/hw/bnxt_re/qplib_fp.c nq_work->cq = qp->rcq; cq 1901 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq) cq 1905 drivers/infiniband/hw/bnxt_re/qplib_fp.c val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | cq 1909 drivers/infiniband/hw/bnxt_re/qplib_fp.c writeq(val, cq->dbr_base); cq 1912 drivers/infiniband/hw/bnxt_re/qplib_fp.c static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type) cq 1914 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_hwq *cq_hwq = &cq->hwq; cq 1919 drivers/infiniband/hw/bnxt_re/qplib_fp.c val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type; cq 1924 drivers/infiniband/hw/bnxt_re/qplib_fp.c writeq(val, cq->dpi->dbr); cq 1927 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) cq 1936 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->hwq.max_elements = cq->max_wqe; cq 1937 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info, cq 1938 drivers/infiniband/hw/bnxt_re/qplib_fp.c &cq->hwq.max_elements, cq 1946 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!cq->dpi) { cq 1951 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.dpi = cpu_to_le32(cq->dpi->dpi); cq 1952 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.cq_handle = cpu_to_le64(cq->cq_handle); cq 1954 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.cq_size = cpu_to_le32(cq->hwq.max_elements); cq 1955 drivers/infiniband/hw/bnxt_re/qplib_fp.c pbl = &cq->hwq.pbl[PBL_LVL_0]; cq 1957 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) << cq 1970 drivers/infiniband/hw/bnxt_re/qplib_fp.c (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << cq 1978 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->id = le32_to_cpu(resp.xid); cq 1979 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq 1980 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; cq 1981 drivers/infiniband/hw/bnxt_re/qplib_fp.c init_waitqueue_head(&cq->waitq); cq 1982 drivers/infiniband/hw/bnxt_re/qplib_fp.c INIT_LIST_HEAD(&cq->sqf_head); cq 1983 drivers/infiniband/hw/bnxt_re/qplib_fp.c INIT_LIST_HEAD(&cq->rqf_head); cq 1984 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_lock_init(&cq->compl_lock); cq 1985 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_lock_init(&cq->flush_lock); cq 1987 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_arm_cq_enable(cq); cq 1991 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_free_hwq(res->pdev, &cq->hwq); cq 1996 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) cq 2006 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.cq_cid = cpu_to_le32(cq->id); cq 2011 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_free_hwq(res->pdev, &cq->hwq); cq 2116 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, cq 2137 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2144 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL); cq 2151 drivers/infiniband/hw/bnxt_re/qplib_fp.c peek_raw_cq_cons = cq->hwq.cons; cq 2153 drivers/infiniband/hw/bnxt_re/qplib_fp.c i = cq->hwq.max_elements; cq 2155 drivers/infiniband/hw/bnxt_re/qplib_fp.c peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); cq 2156 drivers/infiniband/hw/bnxt_re/qplib_fp.c peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; cq 2161 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->hwq.max_elements)) { cq 2189 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2206 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2215 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, cq 2230 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2238 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2245 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2277 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2287 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, cq 2329 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq, cq 2344 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n"); cq 2348 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2369 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2382 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2404 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq, cq 2419 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n"); cq 2423 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2450 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2463 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2485 drivers/infiniband/hw/bnxt_re/qplib_fp.c bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) cq 2491 drivers/infiniband/hw/bnxt_re/qplib_fp.c raw_cons = cq->hwq.cons; cq 2492 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_cons = HWQ_CMP(raw_cons, &cq->hwq); cq 2493 drivers/infiniband/hw/bnxt_re/qplib_fp.c hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; cq 2497 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); cq 2501 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, cq 2516 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n"); cq 2520 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2549 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2554 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2567 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2589 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq, cq 2602 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_warn(&cq->hwq.pdev->dev, cq 2609 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2625 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2632 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2673 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2680 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, cq 2697 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, cq 2702 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2707 drivers/infiniband/hw/bnxt_re/qplib_fp.c clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags); cq 2708 drivers/infiniband/hw/bnxt_re/qplib_fp.c wake_up_interruptible(&cq->waitq); cq 2713 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, cq 2721 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_lock_irqsave(&cq->flush_lock, flags); cq 2722 drivers/infiniband/hw/bnxt_re/qplib_fp.c list_for_each_entry(qp, &cq->sqf_head, sq_flush) { cq 2723 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp); cq 2727 drivers/infiniband/hw/bnxt_re/qplib_fp.c list_for_each_entry(qp, &cq->rqf_head, rq_flush) { cq 2728 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp); cq 2731 drivers/infiniband/hw/bnxt_re/qplib_fp.c spin_unlock_irqrestore(&cq->flush_lock, flags); cq 2736 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, cq 2743 drivers/infiniband/hw/bnxt_re/qplib_fp.c raw_cons = cq->hwq.cons; cq 2747 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_cons = HWQ_CMP(raw_cons, &cq->hwq); cq 2748 drivers/infiniband/hw/bnxt_re/qplib_fp.c hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; cq 2752 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements)) cq 2763 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = bnxt_qplib_cq_process_req(cq, cq 2769 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = bnxt_qplib_cq_process_res_rc(cq, cq 2776 drivers/infiniband/hw/bnxt_re/qplib_fp.c (cq, (struct cq_res_ud *)hw_cqe, &cqe, cq 2781 drivers/infiniband/hw/bnxt_re/qplib_fp.c (cq, (struct cq_res_raweth_qp1 *) cq 2786 drivers/infiniband/hw/bnxt_re/qplib_fp.c (cq, (struct cq_terminal *)hw_cqe, cq 2791 drivers/infiniband/hw/bnxt_re/qplib_fp.c (cq, (struct cq_cutoff *)hw_cqe); cq 2795 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2808 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&cq->hwq.pdev->dev, cq 2813 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (cq->hwq.cons != raw_cons) { cq 2814 drivers/infiniband/hw/bnxt_re/qplib_fp.c cq->hwq.cons = raw_cons; cq 2815 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ); cq 2821 drivers/infiniband/hw/bnxt_re/qplib_fp.c void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type) cq 2824 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_arm_cq(cq, arm_type); cq 2826 drivers/infiniband/hw/bnxt_re/qplib_fp.c atomic_set(&cq->arm_state, 1); cq 52 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct bnxt_qplib_cq *cq; cq 490 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct bnxt_qplib_cq *cq); cq 501 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct bnxt_qplib_cq *cq; cq 511 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct bnxt_qplib_cq *cq), cq 546 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); cq 547 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); cq 548 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, cq 550 drivers/infiniband/hw/bnxt_re/qplib_fp.h bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); cq 551 drivers/infiniband/hw/bnxt_re/qplib_fp.h void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); cq 559 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, cq 71 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, cq 79 drivers/infiniband/hw/cxgb3/cxio_hal.c setup.id = cq->cqid; cq 92 drivers/infiniband/hw/cxgb3/cxio_hal.c if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { cq 95 drivers/infiniband/hw/cxgb3/cxio_hal.c rptr = cq->rptr; cq 101 drivers/infiniband/hw/cxgb3/cxio_hal.c while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) cq 109 drivers/infiniband/hw/cxgb3/cxio_hal.c cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); cq 110 drivers/infiniband/hw/cxgb3/cxio_hal.c while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { cq 156 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) cq 159 drivers/infiniband/hw/cxgb3/cxio_hal.c int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); cq 162 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); cq 163 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!cq->cqid) cq 166 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->sw_queue = kzalloc(size, GFP_KERNEL); cq 167 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!cq->sw_queue) cq 170 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, cq 171 drivers/infiniband/hw/cxgb3/cxio_hal.c &(cq->dma_addr), GFP_KERNEL); cq 172 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!cq->queue) { cq 173 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(cq->sw_queue); cq 176 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr_set(cq, mapping, cq->dma_addr); cq 177 drivers/infiniband/hw/cxgb3/cxio_hal.c setup.id = cq->cqid; cq 178 drivers/infiniband/hw/cxgb3/cxio_hal.c setup.base_addr = (u64) (cq->dma_addr); cq 179 drivers/infiniband/hw/cxgb3/cxio_hal.c setup.size = 1UL << cq->size_log2; cq 305 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) cq 307 drivers/infiniband/hw/cxgb3/cxio_hal.c cxio_hal_clear_cq_ctx(rdev_p, cq->cqid); cq 308 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(cq->sw_queue); cq 310 drivers/infiniband/hw/cxgb3/cxio_hal.c (1UL << (cq->size_log2)) cq 311 drivers/infiniband/hw/cxgb3/cxio_hal.c * sizeof(struct t3_cqe) + 1, cq->queue, cq 312 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr(cq, mapping)); cq 313 drivers/infiniband/hw/cxgb3/cxio_hal.c cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); cq 330 drivers/infiniband/hw/cxgb3/cxio_hal.c static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) cq 335 drivers/infiniband/hw/cxgb3/cxio_hal.c wq, cq, cq->sw_rptr, cq->sw_wptr); cq 342 drivers/infiniband/hw/cxgb3/cxio_hal.c V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq 343 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->size_log2))); cq 344 drivers/infiniband/hw/cxgb3/cxio_hal.c *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; cq 345 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->sw_wptr++; cq 348 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) cq 353 drivers/infiniband/hw/cxgb3/cxio_hal.c pr_debug("%s wq %p cq %p\n", __func__, wq, cq); cq 360 drivers/infiniband/hw/cxgb3/cxio_hal.c insert_recv_cqe(wq, cq); cq 366 drivers/infiniband/hw/cxgb3/cxio_hal.c static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, cq 372 drivers/infiniband/hw/cxgb3/cxio_hal.c wq, cq, cq->sw_rptr, cq->sw_wptr); cq 379 drivers/infiniband/hw/cxgb3/cxio_hal.c V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr, cq 380 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->size_log2))); cq 383 drivers/infiniband/hw/cxgb3/cxio_hal.c *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe; cq 384 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->sw_wptr++; cq 387 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) cq 395 drivers/infiniband/hw/cxgb3/cxio_hal.c insert_sq_cqe(wq, cq, sqp); cq 406 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_flush_hw_cq(struct t3_cq *cq) cq 410 drivers/infiniband/hw/cxgb3/cxio_hal.c pr_debug("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid); cq 411 drivers/infiniband/hw/cxgb3/cxio_hal.c cqe = cxio_next_hw_cqe(cq); cq 414 drivers/infiniband/hw/cxgb3/cxio_hal.c __func__, cq->rptr, cq->sw_wptr); cq 415 drivers/infiniband/hw/cxgb3/cxio_hal.c swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2); cq 418 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->sw_wptr++; cq 419 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->rptr++; cq 420 drivers/infiniband/hw/cxgb3/cxio_hal.c cqe = cxio_next_hw_cqe(cq); cq 442 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cq 448 drivers/infiniband/hw/cxgb3/cxio_hal.c ptr = cq->sw_rptr; cq 449 drivers/infiniband/hw/cxgb3/cxio_hal.c while (!Q_EMPTY(ptr, cq->sw_wptr)) { cq 450 drivers/infiniband/hw/cxgb3/cxio_hal.c cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); cq 457 drivers/infiniband/hw/cxgb3/cxio_hal.c pr_debug("%s cq %p count %d\n", __func__, cq, *count); cq 460 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cq 467 drivers/infiniband/hw/cxgb3/cxio_hal.c ptr = cq->sw_rptr; cq 468 drivers/infiniband/hw/cxgb3/cxio_hal.c while (!Q_EMPTY(ptr, cq->sw_wptr)) { cq 469 drivers/infiniband/hw/cxgb3/cxio_hal.c cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2)); cq 475 drivers/infiniband/hw/cxgb3/cxio_hal.c pr_debug("%s cq %p count %d\n", __func__, cq, *count); cq 1042 drivers/infiniband/hw/cxgb3/cxio_hal.c static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) cq 1060 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_PTR2IDX(cq->sw_wptr, cq->size_log2)); cq 1062 drivers/infiniband/hw/cxgb3/cxio_hal.c *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) cq 1064 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->sw_wptr++; cq 1116 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, cq 1124 drivers/infiniband/hw/cxgb3/cxio_hal.c hw_cqe = cxio_next_cqe(cq); cq 1290 drivers/infiniband/hw/cxgb3/cxio_hal.c flush_completed_wrs(wq, cq); cq 1295 drivers/infiniband/hw/cxgb3/cxio_hal.c __func__, cq, cq->cqid, cq->sw_rptr); cq 1296 drivers/infiniband/hw/cxgb3/cxio_hal.c ++cq->sw_rptr; cq 1299 drivers/infiniband/hw/cxgb3/cxio_hal.c __func__, cq, cq->cqid, cq->rptr); cq 1300 drivers/infiniband/hw/cxgb3/cxio_hal.c ++cq->rptr; cq 1305 drivers/infiniband/hw/cxgb3/cxio_hal.c if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1))) cq 1306 drivers/infiniband/hw/cxgb3/cxio_hal.c || ((cq->rptr - cq->wptr) >= 128)) { cq 1307 drivers/infiniband/hw/cxgb3/cxio_hal.c *credit = cq->rptr - cq->wptr; cq 1308 drivers/infiniband/hw/cxgb3/cxio_hal.c cq->wptr = cq->rptr; cq 158 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, cq 160 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel); cq 161 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); cq 168 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); cq 189 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); cq 190 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); cq 191 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); cq 192 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); cq 193 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_flush_hw_cq(struct t3_cq *cq); cq 194 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, cq 735 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline int cxio_cq_in_error(struct t3_cq *cq) cq 738 drivers/infiniband/hw/cxgb3/cxio_wr.h &cq->queue[1 << cq->size_log2])->cq_err; cq 741 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline void cxio_set_cq_in_error(struct t3_cq *cq) cq 744 drivers/infiniband/hw/cxgb3/cxio_wr.h &cq->queue[1 << cq->size_log2])->cq_err = 1; cq 767 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) cq 771 drivers/infiniband/hw/cxgb3/cxio_wr.h cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); cq 772 drivers/infiniband/hw/cxgb3/cxio_wr.h if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) cq 777 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq) cq 781 drivers/infiniband/hw/cxgb3/cxio_wr.h if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { cq 782 drivers/infiniband/hw/cxgb3/cxio_wr.h cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); cq 788 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq) cq 792 drivers/infiniband/hw/cxgb3/cxio_wr.h if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { cq 793 drivers/infiniband/hw/cxgb3/cxio_wr.h cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); cq 796 drivers/infiniband/hw/cxgb3/cxio_wr.h cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); cq 797 drivers/infiniband/hw/cxgb3/cxio_wr.h if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) cq 45 drivers/infiniband/hw/cxgb3/iwch_cq.c ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, cq 49 drivers/infiniband/hw/cxgb3/iwch_cq.c credit, chp->cq.cqid); cq 50 drivers/infiniband/hw/cxgb3/iwch_cq.c cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit); cq 182 drivers/infiniband/hw/cxgb3/iwch_cq.c rd_cqe = cxio_next_cqe(&chp->cq); cq 92 drivers/infiniband/hw/cxgb3/iwch_ev.c event.element.cq = &chp->ibcq; cq 98 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); cq 102 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); cq 145 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->cq.size_log2 = ilog2(entries); cq 147 drivers/infiniband/hw/cxgb3/iwch_provider.c if (cxio_create_cq(&rhp->rdev, &chp->cq, !udata)) cq 151 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->ibcq.cqe = 1 << chp->cq.size_log2; cq 156 drivers/infiniband/hw/cxgb3/iwch_provider.c if (xa_store_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL)) { cq 157 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); cq 171 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.cqid = chp->cq.cqid; cq 172 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.size_log2 = chp->cq.size_log2; cq 178 drivers/infiniband/hw/cxgb3/iwch_provider.c mm->addr = virt_to_phys(chp->cq.queue); cq 200 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->cq.cqid, chp, (1 << chp->cq.size_log2), cq 201 drivers/infiniband/hw/cxgb3/iwch_provider.c &chp->cq.dma_addr); cq 224 drivers/infiniband/hw/cxgb3/iwch_provider.c chp->cq.rptr = rptr; cq 227 drivers/infiniband/hw/cxgb3/iwch_provider.c pr_debug("%s rptr 0x%x\n", __func__, chp->cq.rptr); cq 228 drivers/infiniband/hw/cxgb3/iwch_provider.c err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); cq 231 drivers/infiniband/hw/cxgb3/iwch_provider.c pr_err("Error %d rearming CQID 0x%x\n", err, chp->cq.cqid); cq 713 drivers/infiniband/hw/cxgb3/iwch_provider.c schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); cq 714 drivers/infiniband/hw/cxgb3/iwch_provider.c rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); cq 770 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; cq 771 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; cq 106 drivers/infiniband/hw/cxgb3/iwch_provider.h struct t3_cq cq; cq 741 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_flush_hw_cq(&rchp->cq); cq 742 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); cq 743 drivers/infiniband/hw/cxgb3/iwch_qp.c flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); cq 755 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_flush_hw_cq(&schp->cq); cq 756 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_count_scqes(&schp->cq, &qhp->wq, &count); cq 757 drivers/infiniband/hw/cxgb3/iwch_qp.c flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); cq 782 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_set_cq_in_error(&rchp->cq); cq 787 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_set_cq_in_error(&schp->cq); cq 37 drivers/infiniband/hw/cxgb4/cq.c static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, cq 56 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.restype = FW_RI_RES_TYPE_CQ; cq 57 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.op = FW_RI_RES_OP_RESET; cq 58 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqid = cpu_to_be32(cq->cqid); cq 63 drivers/infiniband/hw/cxgb4/cq.c kfree(cq->sw_queue); cq 65 drivers/infiniband/hw/cxgb4/cq.c cq->memsize, cq->queue, cq 66 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr(cq, mapping)); cq 67 drivers/infiniband/hw/cxgb4/cq.c c4iw_put_cqid(rdev, cq->cqid, uctx); cq 70 drivers/infiniband/hw/cxgb4/cq.c static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, cq 85 drivers/infiniband/hw/cxgb4/cq.c cq->cqid = c4iw_get_cqid(rdev, uctx); cq 86 drivers/infiniband/hw/cxgb4/cq.c if (!cq->cqid) { cq 92 drivers/infiniband/hw/cxgb4/cq.c cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); cq 93 drivers/infiniband/hw/cxgb4/cq.c if (!cq->sw_queue) { cq 98 drivers/infiniband/hw/cxgb4/cq.c cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq 99 drivers/infiniband/hw/cxgb4/cq.c &cq->dma_addr, GFP_KERNEL); cq 100 drivers/infiniband/hw/cxgb4/cq.c if (!cq->queue) { cq 104 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr_set(cq, mapping, cq->dma_addr); cq 107 drivers/infiniband/hw/cxgb4/cq.c cq->qp_errp = &((struct t4_status_page *) cq 108 drivers/infiniband/hw/cxgb4/cq.c ((u8 *)cq->queue + (cq->size - 1) * cq 109 drivers/infiniband/hw/cxgb4/cq.c (sizeof(*cq->queue) / 2)))->qp_err; cq 111 drivers/infiniband/hw/cxgb4/cq.c cq->qp_errp = &((struct t4_status_page *) cq 112 drivers/infiniband/hw/cxgb4/cq.c ((u8 *)cq->queue + (cq->size - 1) * cq 113 drivers/infiniband/hw/cxgb4/cq.c sizeof(*cq->queue)))->qp_err; cq 134 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.restype = FW_RI_RES_TYPE_CQ; cq 135 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.op = FW_RI_RES_OP_WRITE; cq 136 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqid = cpu_to_be32(cq->cqid); cq 137 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( cq 142 drivers/infiniband/hw/cxgb4/cq.c rdev->lldi.ciq_ids[cq->vector])); cq 143 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( cq 151 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqsize = cpu_to_be16(cq->size); cq 152 drivers/infiniband/hw/cxgb4/cq.c res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); cq 159 drivers/infiniband/hw/cxgb4/cq.c cq->gen = 1; cq 160 drivers/infiniband/hw/cxgb4/cq.c cq->gts = rdev->lldi.gts_reg; cq 161 drivers/infiniband/hw/cxgb4/cq.c cq->rdev = rdev; cq 163 drivers/infiniband/hw/cxgb4/cq.c cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS, cq 164 drivers/infiniband/hw/cxgb4/cq.c &cq->bar2_qid, cq 165 drivers/infiniband/hw/cxgb4/cq.c user ? &cq->bar2_pa : NULL); cq 166 drivers/infiniband/hw/cxgb4/cq.c if (user && !cq->bar2_pa) { cq 168 drivers/infiniband/hw/cxgb4/cq.c pci_name(rdev->lldi.pdev), cq->cqid); cq 174 drivers/infiniband/hw/cxgb4/cq.c dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, cq 175 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr(cq, mapping)); cq 177 drivers/infiniband/hw/cxgb4/cq.c kfree(cq->sw_queue); cq 179 drivers/infiniband/hw/cxgb4/cq.c c4iw_put_cqid(rdev, cq->cqid, uctx); cq 184 drivers/infiniband/hw/cxgb4/cq.c static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) cq 189 drivers/infiniband/hw/cxgb4/cq.c wq, cq, cq->sw_cidx, cq->sw_pidx); cq 196 drivers/infiniband/hw/cxgb4/cq.c cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq 199 drivers/infiniband/hw/cxgb4/cq.c cq->sw_queue[cq->sw_pidx] = cqe; cq 200 drivers/infiniband/hw/cxgb4/cq.c t4_swcq_produce(cq); cq 203 drivers/infiniband/hw/cxgb4/cq.c int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) cq 209 drivers/infiniband/hw/cxgb4/cq.c wq, cq, wq->rq.in_use, count); cq 211 drivers/infiniband/hw/cxgb4/cq.c insert_recv_cqe(wq, cq, 0); cq 217 drivers/infiniband/hw/cxgb4/cq.c static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, cq 223 drivers/infiniband/hw/cxgb4/cq.c wq, cq, cq->sw_cidx, cq->sw_pidx); cq 231 drivers/infiniband/hw/cxgb4/cq.c cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq 232 drivers/infiniband/hw/cxgb4/cq.c cq->sw_queue[cq->sw_pidx] = cqe; cq 233 drivers/infiniband/hw/cxgb4/cq.c t4_swcq_produce(cq); cq 243 drivers/infiniband/hw/cxgb4/cq.c struct t4_cq *cq = &chp->cq; cq 253 drivers/infiniband/hw/cxgb4/cq.c insert_sq_cqe(wq, cq, swsqe); cq 267 drivers/infiniband/hw/cxgb4/cq.c static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) cq 287 drivers/infiniband/hw/cxgb4/cq.c cidx, cq->sw_pidx); cq 289 drivers/infiniband/hw/cxgb4/cq.c cq->sw_queue[cq->sw_pidx] = swsqe->cqe; cq 290 drivers/infiniband/hw/cxgb4/cq.c t4_swcq_produce(cq); cq 342 drivers/infiniband/hw/cxgb4/cq.c pr_debug("cqid 0x%x\n", chp->cq.cqid); cq 343 drivers/infiniband/hw/cxgb4/cq.c ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); cq 407 drivers/infiniband/hw/cxgb4/cq.c flush_completed_wrs(&qhp->wq, &chp->cq); cq 409 drivers/infiniband/hw/cxgb4/cq.c swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx]; cq 412 drivers/infiniband/hw/cxgb4/cq.c t4_swcq_produce(&chp->cq); cq 415 drivers/infiniband/hw/cxgb4/cq.c t4_hwcq_consume(&chp->cq); cq 416 drivers/infiniband/hw/cxgb4/cq.c ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); cq 443 drivers/infiniband/hw/cxgb4/cq.c void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) cq 450 drivers/infiniband/hw/cxgb4/cq.c ptr = cq->sw_cidx; cq 451 drivers/infiniband/hw/cxgb4/cq.c while (ptr != cq->sw_pidx) { cq 452 drivers/infiniband/hw/cxgb4/cq.c cqe = &cq->sw_queue[ptr]; cq 456 drivers/infiniband/hw/cxgb4/cq.c if (++ptr == cq->size) cq 459 drivers/infiniband/hw/cxgb4/cq.c pr_debug("cq %p count %d\n", cq, *count); cq 544 drivers/infiniband/hw/cxgb4/cq.c static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, cq 553 drivers/infiniband/hw/cxgb4/cq.c ret = t4_next_cqe(cq, &hw_cqe); cq 739 drivers/infiniband/hw/cxgb4/cq.c flush_completed_wrs(wq, cq); cq 744 drivers/infiniband/hw/cxgb4/cq.c cq, cq->cqid, cq->sw_cidx); cq 745 drivers/infiniband/hw/cxgb4/cq.c t4_swcq_consume(cq); cq 748 drivers/infiniband/hw/cxgb4/cq.c cq, cq->cqid, cq->cidx); cq 749 drivers/infiniband/hw/cxgb4/cq.c t4_hwcq_consume(cq); cq 764 drivers/infiniband/hw/cxgb4/cq.c ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, cq 928 drivers/infiniband/hw/cxgb4/cq.c ret = t4_next_cqe(&chp->cq, &rd_cqe); cq 978 drivers/infiniband/hw/cxgb4/cq.c xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); cq 984 drivers/infiniband/hw/cxgb4/cq.c destroy_cq(&chp->rhp->rdev, &chp->cq, cq 985 drivers/infiniband/hw/cxgb4/cq.c ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx, cq 1056 drivers/infiniband/hw/cxgb4/cq.c (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue)); cq 1064 drivers/infiniband/hw/cxgb4/cq.c chp->cq.size = hwentries; cq 1065 drivers/infiniband/hw/cxgb4/cq.c chp->cq.memsize = memsize; cq 1066 drivers/infiniband/hw/cxgb4/cq.c chp->cq.vector = vector; cq 1068 drivers/infiniband/hw/cxgb4/cq.c ret = create_cq(&rhp->rdev, &chp->cq, cq 1075 drivers/infiniband/hw/cxgb4/cq.c chp->cq.size--; /* status page */ cq 1081 drivers/infiniband/hw/cxgb4/cq.c ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); cq 1096 drivers/infiniband/hw/cxgb4/cq.c uresp.cqid = chp->cq.cqid; cq 1097 drivers/infiniband/hw/cxgb4/cq.c uresp.size = chp->cq.size; cq 1098 drivers/infiniband/hw/cxgb4/cq.c uresp.memsize = chp->cq.memsize; cq 1118 drivers/infiniband/hw/cxgb4/cq.c mm->addr = virt_to_phys(chp->cq.queue); cq 1119 drivers/infiniband/hw/cxgb4/cq.c mm->len = chp->cq.memsize; cq 1123 drivers/infiniband/hw/cxgb4/cq.c mm2->addr = chp->cq.bar2_pa; cq 1129 drivers/infiniband/hw/cxgb4/cq.c chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize, cq 1130 drivers/infiniband/hw/cxgb4/cq.c &chp->cq.dma_addr); cq 1137 drivers/infiniband/hw/cxgb4/cq.c xa_erase_irq(&rhp->cqs, chp->cq.cqid); cq 1139 drivers/infiniband/hw/cxgb4/cq.c destroy_cq(&chp->rhp->rdev, &chp->cq, cq 1158 drivers/infiniband/hw/cxgb4/cq.c t4_arm_cq(&chp->cq, cq 1161 drivers/infiniband/hw/cxgb4/cq.c ret = t4_cq_notempty(&chp->cq); cq 1176 drivers/infiniband/hw/cxgb4/cq.c insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx); cq 802 drivers/infiniband/hw/cxgb4/device.c if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || cq 803 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { cq 806 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, cq 807 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->cq.size); cq 831 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->cq.start, cq 832 drivers/infiniband/hw/cxgb4/device.c rdev->lldi.vr->cq.size, cq 879 drivers/infiniband/hw/cxgb4/device.c rdev->status_page->cq_start = rdev->lldi.vr->cq.start; cq 880 drivers/infiniband/hw/cxgb4/device.c rdev->status_page->cq_size = rdev->lldi.vr->cq.size; cq 964 drivers/infiniband/hw/cxgb4/device.c infop->vr->cq.size > 0; cq 107 drivers/infiniband/hw/cxgb4/ev.c event.element.cq = &chp->ibcq; cq 113 drivers/infiniband/hw/cxgb4/ev.c if (t4_clear_cq_armed(&chp->cq)) { cq 233 drivers/infiniband/hw/cxgb4/ev.c t4_clear_cq_armed(&chp->cq); cq 432 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct t4_cq cq; cq 1021 drivers/infiniband/hw/cxgb4/iw_cxgb4.h void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); cq 1023 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); cq 984 drivers/infiniband/hw/cxgb4/qp.c struct t4_cq *cq; cq 988 drivers/infiniband/hw/cxgb4/qp.c cq = &schp->cq; cq 1003 drivers/infiniband/hw/cxgb4/qp.c cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq 1004 drivers/infiniband/hw/cxgb4/qp.c cq->sw_queue[cq->sw_pidx] = cqe; cq 1005 drivers/infiniband/hw/cxgb4/qp.c t4_swcq_produce(cq); cq 1008 drivers/infiniband/hw/cxgb4/qp.c if (t4_clear_cq_armed(&schp->cq)) { cq 1040 drivers/infiniband/hw/cxgb4/qp.c struct t4_cq *cq; cq 1043 drivers/infiniband/hw/cxgb4/qp.c cq = &rchp->cq; cq 1054 drivers/infiniband/hw/cxgb4/qp.c cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); cq 1055 drivers/infiniband/hw/cxgb4/qp.c cq->sw_queue[cq->sw_pidx] = cqe; cq 1056 drivers/infiniband/hw/cxgb4/qp.c t4_swcq_produce(cq); cq 1059 drivers/infiniband/hw/cxgb4/qp.c if (t4_clear_cq_armed(&rchp->cq)) { cq 1622 drivers/infiniband/hw/cxgb4/qp.c c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); cq 1623 drivers/infiniband/hw/cxgb4/qp.c rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); cq 1637 drivers/infiniband/hw/cxgb4/qp.c t4_clear_cq_armed(&rchp->cq)) { cq 1644 drivers/infiniband/hw/cxgb4/qp.c if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { cq 1650 drivers/infiniband/hw/cxgb4/qp.c if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { cq 1675 drivers/infiniband/hw/cxgb4/qp.c t4_set_cq_in_error(&rchp->cq); cq 1680 drivers/infiniband/hw/cxgb4/qp.c t4_set_cq_in_error(&schp->cq); cq 2134 drivers/infiniband/hw/cxgb4/qp.c schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); cq 2135 drivers/infiniband/hw/cxgb4/qp.c rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); cq 2185 drivers/infiniband/hw/cxgb4/qp.c ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, cq 2197 drivers/infiniband/hw/cxgb4/qp.c qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; cq 2198 drivers/infiniband/hw/cxgb4/qp.c qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; cq 274 drivers/infiniband/hw/cxgb4/restrack.c static int fill_cq(struct sk_buff *msg, struct t4_cq *cq) cq 276 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid)) cq 278 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize)) cq 280 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "size", cq->size)) cq 282 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx)) cq 284 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc)) cq 286 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx)) cq 288 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx)) cq 290 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use)) cq 292 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "vector", cq->vector)) cq 294 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "gen", cq->gen)) cq 296 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "error", cq->error)) cq 299 drivers/infiniband/hw/cxgb4/restrack.c be64_to_cpu(cq->bits_type_ts))) cq 301 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags)) cq 336 drivers/infiniband/hw/cxgb4/restrack.c static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq, cq 341 drivers/infiniband/hw/cxgb4/restrack.c idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1; cq 344 drivers/infiniband/hw/cxgb4/restrack.c idx = cq->cidx; cq 353 drivers/infiniband/hw/cxgb4/restrack.c static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq, cq 358 drivers/infiniband/hw/cxgb4/restrack.c if (!cq->sw_in_use) cq 361 drivers/infiniband/hw/cxgb4/restrack.c idx = cq->sw_cidx; cq 364 drivers/infiniband/hw/cxgb4/restrack.c if (cq->sw_in_use == 1) cq 366 drivers/infiniband/hw/cxgb4/restrack.c idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1; cq 383 drivers/infiniband/hw/cxgb4/restrack.c struct t4_cq cq; cq 398 drivers/infiniband/hw/cxgb4/restrack.c cq = chp->cq; cq 401 drivers/infiniband/hw/cxgb4/restrack.c idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1; cq 402 drivers/infiniband/hw/cxgb4/restrack.c hwcqes[0] = chp->cq.queue[idx]; cq 404 drivers/infiniband/hw/cxgb4/restrack.c idx = cq.cidx; cq 405 drivers/infiniband/hw/cxgb4/restrack.c hwcqes[1] = chp->cq.queue[idx]; cq 408 drivers/infiniband/hw/cxgb4/restrack.c if (cq.sw_in_use) { cq 409 drivers/infiniband/hw/cxgb4/restrack.c swcqes[0] = chp->cq.sw_queue[cq.sw_cidx]; cq 410 drivers/infiniband/hw/cxgb4/restrack.c if (cq.sw_in_use > 1) { cq 411 drivers/infiniband/hw/cxgb4/restrack.c idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1; cq 412 drivers/infiniband/hw/cxgb4/restrack.c swcqes[1] = chp->cq.sw_queue[idx]; cq 418 drivers/infiniband/hw/cxgb4/restrack.c if (fill_cq(msg, &cq)) cq 421 drivers/infiniband/hw/cxgb4/restrack.c if (fill_swcqes(msg, &cq, swcqes)) cq 424 drivers/infiniband/hw/cxgb4/restrack.c if (fill_hwcqes(msg, &cq, hwcqes)) cq 718 drivers/infiniband/hw/cxgb4/t4.h static inline void write_gts(struct t4_cq *cq, u32 val) cq 720 drivers/infiniband/hw/cxgb4/t4.h if (cq->bar2_va) cq 721 drivers/infiniband/hw/cxgb4/t4.h writel(val | INGRESSQID_V(cq->bar2_qid), cq 722 drivers/infiniband/hw/cxgb4/t4.h cq->bar2_va + SGE_UDB_GTS); cq 724 drivers/infiniband/hw/cxgb4/t4.h writel(val | INGRESSQID_V(cq->cqid), cq->gts); cq 727 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_clear_cq_armed(struct t4_cq *cq) cq 729 drivers/infiniband/hw/cxgb4/t4.h return test_and_clear_bit(CQ_ARMED, &cq->flags); cq 732 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_arm_cq(struct t4_cq *cq, int se) cq 736 drivers/infiniband/hw/cxgb4/t4.h set_bit(CQ_ARMED, &cq->flags); cq 737 drivers/infiniband/hw/cxgb4/t4.h while (cq->cidx_inc > CIDXINC_M) { cq 739 drivers/infiniband/hw/cxgb4/t4.h write_gts(cq, val); cq 740 drivers/infiniband/hw/cxgb4/t4.h cq->cidx_inc -= CIDXINC_M; cq 742 drivers/infiniband/hw/cxgb4/t4.h val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6); cq 743 drivers/infiniband/hw/cxgb4/t4.h write_gts(cq, val); cq 744 drivers/infiniband/hw/cxgb4/t4.h cq->cidx_inc = 0; cq 748 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_swcq_produce(struct t4_cq *cq) cq 750 drivers/infiniband/hw/cxgb4/t4.h cq->sw_in_use++; cq 751 drivers/infiniband/hw/cxgb4/t4.h if (cq->sw_in_use == cq->size) { cq 753 drivers/infiniband/hw/cxgb4/t4.h __func__, cq->cqid); cq 754 drivers/infiniband/hw/cxgb4/t4.h cq->error = 1; cq 755 drivers/infiniband/hw/cxgb4/t4.h cq->sw_in_use--; cq 758 drivers/infiniband/hw/cxgb4/t4.h if (++cq->sw_pidx == cq->size) cq 759 drivers/infiniband/hw/cxgb4/t4.h cq->sw_pidx = 0; cq 762 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_swcq_consume(struct t4_cq *cq) cq 764 drivers/infiniband/hw/cxgb4/t4.h cq->sw_in_use--; cq 765 drivers/infiniband/hw/cxgb4/t4.h if (++cq->sw_cidx == cq->size) cq 766 drivers/infiniband/hw/cxgb4/t4.h cq->sw_cidx = 0; cq 769 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_hwcq_consume(struct t4_cq *cq) cq 771 drivers/infiniband/hw/cxgb4/t4.h cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; cq 772 drivers/infiniband/hw/cxgb4/t4.h if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { cq 775 drivers/infiniband/hw/cxgb4/t4.h val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7); cq 776 drivers/infiniband/hw/cxgb4/t4.h write_gts(cq, val); cq 777 drivers/infiniband/hw/cxgb4/t4.h cq->cidx_inc = 0; cq 779 drivers/infiniband/hw/cxgb4/t4.h if (++cq->cidx == cq->size) { cq 780 drivers/infiniband/hw/cxgb4/t4.h cq->cidx = 0; cq 781 drivers/infiniband/hw/cxgb4/t4.h cq->gen ^= 1; cq 785 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) cq 787 drivers/infiniband/hw/cxgb4/t4.h return (CQE_GENBIT(cqe) == cq->gen); cq 790 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_cq_notempty(struct t4_cq *cq) cq 792 drivers/infiniband/hw/cxgb4/t4.h return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); cq 795 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) cq 800 drivers/infiniband/hw/cxgb4/t4.h if (cq->cidx == 0) cq 801 drivers/infiniband/hw/cxgb4/t4.h prev_cidx = cq->size - 1; cq 803 drivers/infiniband/hw/cxgb4/t4.h prev_cidx = cq->cidx - 1; cq 805 drivers/infiniband/hw/cxgb4/t4.h if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) { cq 807 drivers/infiniband/hw/cxgb4/t4.h cq->error = 1; cq 808 drivers/infiniband/hw/cxgb4/t4.h pr_err("cq overflow cqid %u\n", cq->cqid); cq 809 drivers/infiniband/hw/cxgb4/t4.h } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { cq 813 drivers/infiniband/hw/cxgb4/t4.h *cqe = &cq->queue[cq->cidx]; cq 820 drivers/infiniband/hw/cxgb4/t4.h static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) cq 822 drivers/infiniband/hw/cxgb4/t4.h if (cq->sw_in_use == cq->size) { cq 824 drivers/infiniband/hw/cxgb4/t4.h __func__, cq->cqid); cq 825 drivers/infiniband/hw/cxgb4/t4.h cq->error = 1; cq 828 drivers/infiniband/hw/cxgb4/t4.h if (cq->sw_in_use) cq 829 drivers/infiniband/hw/cxgb4/t4.h return &cq->sw_queue[cq->sw_cidx]; cq 833 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) cq 837 drivers/infiniband/hw/cxgb4/t4.h if (cq->error) cq 839 drivers/infiniband/hw/cxgb4/t4.h else if (cq->sw_in_use) cq 840 drivers/infiniband/hw/cxgb4/t4.h *cqe = &cq->sw_queue[cq->sw_cidx]; cq 842 drivers/infiniband/hw/cxgb4/t4.h ret = t4_next_hw_cqe(cq, cqe); cq 846 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_cq_in_error(struct t4_cq *cq) cq 848 drivers/infiniband/hw/cxgb4/t4.h return *cq->qp_errp; cq 851 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_set_cq_in_error(struct t4_cq *cq) cq 853 drivers/infiniband/hw/cxgb4/t4.h *cq->qp_errp = 1; cq 301 drivers/infiniband/hw/cxgb4/t4fw_ri_api.h } cq; cq 176 drivers/infiniband/hw/efa/efa_com.c struct efa_com_admin_cq *cq = &aq->cq; cq 177 drivers/infiniband/hw/efa/efa_com.c u16 size = aq->depth * sizeof(*cq->entries); cq 182 drivers/infiniband/hw/efa/efa_com.c cq->entries = cq 183 drivers/infiniband/hw/efa/efa_com.c dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL); cq 184 drivers/infiniband/hw/efa/efa_com.c if (!cq->entries) cq 187 drivers/infiniband/hw/efa/efa_com.c spin_lock_init(&cq->lock); cq 189 drivers/infiniband/hw/efa/efa_com.c cq->cc = 0; cq 190 drivers/infiniband/hw/efa/efa_com.c cq->phase = 1; cq 192 drivers/infiniband/hw/efa/efa_com.c addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr); cq 193 drivers/infiniband/hw/efa/efa_com.c addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr); cq 460 drivers/infiniband/hw/efa/efa_com.c ci = aq->cq.cc & queue_size_mask; cq 461 drivers/infiniband/hw/efa/efa_com.c phase = aq->cq.phase; cq 463 drivers/infiniband/hw/efa/efa_com.c cqe = &aq->cq.entries[ci]; cq 482 drivers/infiniband/hw/efa/efa_com.c cqe = &aq->cq.entries[ci]; cq 485 drivers/infiniband/hw/efa/efa_com.c aq->cq.cc += comp_num; cq 486 drivers/infiniband/hw/efa/efa_com.c aq->cq.phase = phase; cq 520 drivers/infiniband/hw/efa/efa_com.c spin_lock_irqsave(&aq->cq.lock, flags); cq 522 drivers/infiniband/hw/efa/efa_com.c spin_unlock_irqrestore(&aq->cq.lock, flags); cq 564 drivers/infiniband/hw/efa/efa_com.c spin_lock_irqsave(&aq->cq.lock, flags); cq 566 drivers/infiniband/hw/efa/efa_com.c spin_unlock_irqrestore(&aq->cq.lock, flags); cq 576 drivers/infiniband/hw/efa/efa_com.c comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); cq 583 drivers/infiniband/hw/efa/efa_com.c comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); cq 677 drivers/infiniband/hw/efa/efa_com.c struct efa_com_admin_cq *cq = &aq->cq; cq 689 drivers/infiniband/hw/efa/efa_com.c size = aq->depth * sizeof(*cq->entries); cq 690 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr); cq 796 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries), cq 797 drivers/infiniband/hw/efa/efa_com.c aq->cq.entries, aq->cq.dma_addr); cq 820 drivers/infiniband/hw/efa/efa_com.c spin_lock_irqsave(&edev->aq.cq.lock, flags); cq 822 drivers/infiniband/hw/efa/efa_com.c spin_unlock_irqrestore(&edev->aq.cq.lock, flags); cq 65 drivers/infiniband/hw/efa/efa_com.h struct efa_com_admin_cq cq; cq 891 drivers/infiniband/hw/efa/efa_verbs.c struct efa_cq *cq = to_ecq(ibcq); cq 895 drivers/infiniband/hw/efa/efa_verbs.c cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); cq 897 drivers/infiniband/hw/efa/efa_verbs.c efa_destroy_cq_idx(dev, cq->cq_idx); cq 898 drivers/infiniband/hw/efa/efa_verbs.c dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, cq 902 drivers/infiniband/hw/efa/efa_verbs.c static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, cq 905 drivers/infiniband/hw/efa/efa_verbs.c resp->q_mmap_size = cq->size; cq 906 drivers/infiniband/hw/efa/efa_verbs.c resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq, cq 907 drivers/infiniband/hw/efa/efa_verbs.c virt_to_phys(cq->cpu_addr), cq 908 drivers/infiniband/hw/efa/efa_verbs.c cq->size, EFA_MMAP_DMA_PAGE); cq 926 drivers/infiniband/hw/efa/efa_verbs.c struct efa_cq *cq = to_ecq(ibcq); cq 986 drivers/infiniband/hw/efa/efa_verbs.c cq->ucontext = ucontext; cq 987 drivers/infiniband/hw/efa/efa_verbs.c cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs); cq 988 drivers/infiniband/hw/efa/efa_verbs.c cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size, cq 990 drivers/infiniband/hw/efa/efa_verbs.c if (!cq->cpu_addr) { cq 995 drivers/infiniband/hw/efa/efa_verbs.c params.uarn = cq->ucontext->uarn; cq 997 drivers/infiniband/hw/efa/efa_verbs.c params.dma_addr = cq->dma_addr; cq 1005 drivers/infiniband/hw/efa/efa_verbs.c cq->cq_idx = result.cq_idx; cq 1006 drivers/infiniband/hw/efa/efa_verbs.c cq->ibcq.cqe = result.actual_depth; cq 1009 drivers/infiniband/hw/efa/efa_verbs.c err = cq_mmap_entries_setup(dev, cq, &resp); cq 1012 drivers/infiniband/hw/efa/efa_verbs.c cq->cq_idx); cq 1029 drivers/infiniband/hw/efa/efa_verbs.c cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr); cq 1034 drivers/infiniband/hw/efa/efa_verbs.c efa_destroy_cq_idx(dev, cq->cq_idx); cq 1036 drivers/infiniband/hw/efa/efa_verbs.c dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, cq 1039 drivers/infiniband/hw/efa/efa_verbs.c free_pages_exact(cq->cpu_addr, cq->size); cq 306 drivers/infiniband/hw/hfi1/file_ops.c struct hfi1_user_sdma_comp_q *cq = fd->cq; cq 313 drivers/infiniband/hw/hfi1/file_ops.c if (!cq || !pq) { cq 547 drivers/infiniband/hw/hfi1/file_ops.c struct hfi1_user_sdma_comp_q *cq = fd->cq; cq 549 drivers/infiniband/hw/hfi1/file_ops.c if (!cq) { cq 553 drivers/infiniband/hw/hfi1/file_ops.c memaddr = (u64)cq->comps; cq 554 drivers/infiniband/hw/hfi1/file_ops.c memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries); cq 1171 drivers/infiniband/hw/hfi1/file_ops.c cinfo.sdma_ring_size = fd->cq->nentries; cq 1444 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_user_sdma_comp_q *cq; cq 95 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_comp_q *cq, cq 170 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_comp_q *cq; cq 222 drivers/infiniband/hw/hfi1/user_sdma.c cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq 223 drivers/infiniband/hw/hfi1/user_sdma.c if (!cq) cq 226 drivers/infiniband/hw/hfi1/user_sdma.c cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps) cq 228 drivers/infiniband/hw/hfi1/user_sdma.c if (!cq->comps) cq 231 drivers/infiniband/hw/hfi1/user_sdma.c cq->nentries = hfi1_sdma_comp_ring_size; cq 241 drivers/infiniband/hw/hfi1/user_sdma.c fd->cq = cq; cq 246 drivers/infiniband/hw/hfi1/user_sdma.c vfree(cq->comps); cq 248 drivers/infiniband/hw/hfi1/user_sdma.c kfree(cq); cq 306 drivers/infiniband/hw/hfi1/user_sdma.c if (fd->cq) { cq 307 drivers/infiniband/hw/hfi1/user_sdma.c vfree(fd->cq->comps); cq 308 drivers/infiniband/hw/hfi1/user_sdma.c kfree(fd->cq); cq 309 drivers/infiniband/hw/hfi1/user_sdma.c fd->cq = NULL; cq 350 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_comp_q *cq = fd->cq; cq 421 drivers/infiniband/hw/hfi1/user_sdma.c req->cq = cq; cq 590 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); cq 626 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, info.comp_idx, ERROR, ret); cq 1403 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_comp_q *cq; cq 1411 drivers/infiniband/hw/hfi1/user_sdma.c cq = req->cq; cq 1428 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, req->info.comp_idx, state, status); cq 1474 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_comp_q *cq, cq 1479 drivers/infiniband/hw/hfi1/user_sdma.c cq->comps[idx].errcode = -ret; cq 1481 drivers/infiniband/hw/hfi1/user_sdma.c cq->comps[idx].status = state; cq 179 drivers/infiniband/hw/hfi1/user_sdma.h struct hfi1_user_sdma_comp_q *cq; cq 71 drivers/infiniband/hw/hns/hns_roce_cq.c event.element.cq = ibcq; cq 524 drivers/infiniband/hw/hns/hns_roce_cq.c struct hns_roce_cq *cq; cq 526 drivers/infiniband/hw/hns/hns_roce_cq.c cq = xa_load(&hr_dev->cq_table.array, cqn & (hr_dev->caps.num_cqs - 1)); cq 527 drivers/infiniband/hw/hns/hns_roce_cq.c if (!cq) { cq 532 drivers/infiniband/hw/hns/hns_roce_cq.c ++cq->arm_sn; cq 533 drivers/infiniband/hw/hns/hns_roce_cq.c cq->comp(cq); cq 540 drivers/infiniband/hw/hns/hns_roce_cq.c struct hns_roce_cq *cq; cq 542 drivers/infiniband/hw/hns/hns_roce_cq.c cq = xa_load(&cq_table->array, cqn & (hr_dev->caps.num_cqs - 1)); cq 543 drivers/infiniband/hw/hns/hns_roce_cq.c if (cq) cq 544 drivers/infiniband/hw/hns/hns_roce_cq.c atomic_inc(&cq->refcount); cq 546 drivers/infiniband/hw/hns/hns_roce_cq.c if (!cq) { cq 551 drivers/infiniband/hw/hns/hns_roce_cq.c cq->event(cq, (enum hns_roce_event)event_type); cq 553 drivers/infiniband/hw/hns/hns_roce_cq.c if (atomic_dec_and_test(&cq->refcount)) cq 554 drivers/infiniband/hw/hns/hns_roce_cq.c complete(&cq->free); cq 497 drivers/infiniband/hw/hns/hns_roce_device.h void (*comp)(struct hns_roce_cq *cq); cq 498 drivers/infiniband/hw/hns/hns_roce_device.h void (*event)(struct hns_roce_cq *cq, enum hns_roce_event event_type); cq 733 drivers/infiniband/hw/hns/hns_roce_device.h __le32 cq; cq 978 drivers/infiniband/hw/hns/hns_roce_device.h int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); cq 708 drivers/infiniband/hw/hns/hns_roce_hw_v1.c struct ib_cq *cq; cq 728 drivers/infiniband/hw/hns/hns_roce_hw_v1.c cq = rdma_zalloc_drv_obj(ibdev, ib_cq); cq 729 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (!cq) cq 732 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL); cq 737 drivers/infiniband/hw/hns/hns_roce_hw_v1.c free_mr->mr_free_cq = to_hr_cq(cq); cq 811 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->ibqp.recv_cq = cq; cq 812 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->ibqp.send_cq = cq; cq 868 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hns_roce_ib_destroy_cq(cq, NULL); cq 870 drivers/infiniband/hw/hns/hns_roce_hw_v1.c kfree(cq); cq 2151 drivers/infiniband/hw/hns/hns_roce_hw_v1.c static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) cq 3811 drivers/infiniband/hw/hns/hns_roce_hw_v1.c cqn = roce_get_field(aeqe->event.cq_event.cq, cq 4793 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) cq 4795 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); cq 4797 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_cq *hr_cq = to_hr_cq(cq); cq 5050 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cqn = roce_get_field(aeqe->event.cq_event.cq, cq 409 drivers/infiniband/hw/hns/hns_roce_srq.c to_hr_cq(srq_init_attr->ext.cq)->cqn : 0; cq 525 drivers/infiniband/hw/i40iw/i40iw.h void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); cq 1636 drivers/infiniband/hw/i40iw/i40iw_ctrl.c struct i40iw_sc_cq *cq = NULL; cq 1643 drivers/infiniband/hw/i40iw/i40iw_ctrl.c return cq; cq 1645 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1); cq 1652 drivers/infiniband/hw/i40iw/i40iw_ctrl.c i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id); cq 1654 drivers/infiniband/hw/i40iw/i40iw_ctrl.c i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id); cq 1656 drivers/infiniband/hw/i40iw/i40iw_ctrl.c return cq; cq 1838 drivers/infiniband/hw/i40iw/i40iw_ctrl.c info->cq = true; cq 1855 drivers/infiniband/hw/i40iw/i40iw_ctrl.c info->cq = true; cq 1934 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq, cq 1950 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_pa = info->cq_pa; cq 1951 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.cq_base = info->cq_base; cq 1952 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->shadow_area_pa = info->shadow_area_pa; cq 1953 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.shadow_area = info->shadow_area; cq 1954 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->shadow_read_threshold = info->shadow_read_threshold; cq 1955 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->dev = info->dev; cq 1956 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id = info->ceq_id; cq 1957 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.cq_size = info->num_elem; cq 1958 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_type = I40IW_CQ_TYPE_CQP; cq 1959 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceqe_mask = info->ceqe_mask; cq 1960 drivers/infiniband/hw/i40iw/i40iw_ctrl.c I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); cq 1962 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ cq 1963 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id_valid = info->ceq_id_valid; cq 1964 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->tph_en = info->tph_en; cq 1965 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->tph_val = info->tph_val; cq 1966 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; cq 1968 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->pbl_list = info->pbl_list; cq 1969 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->virtual_map = info->virtual_map; cq 1970 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->pbl_chunk_size = info->pbl_chunk_size; cq 1971 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq 1972 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.polarity = true; cq 1975 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.cqe_alloc_reg = NULL; cq 1976 drivers/infiniband/hw/i40iw/i40iw_ctrl.c info->dev->ccq = cq; cq 2109 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq, cq 2122 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_pa = info->cq_base_pa; cq 2123 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->dev = info->dev; cq 2124 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id = info->ceq_id; cq 2126 drivers/infiniband/hw/i40iw/i40iw_ctrl.c if (i40iw_get_hw_addr(cq->dev)) cq 2127 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) + cq 2130 drivers/infiniband/hw/i40iw/i40iw_ctrl.c ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info); cq 2133 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->virtual_map = info->virtual_map; cq 2134 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->pbl_chunk_size = info->pbl_chunk_size; cq 2135 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceqe_mask = info->ceqe_mask; cq 2136 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP; cq 2138 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->shadow_area_pa = info->shadow_area_pa; cq 2139 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->shadow_read_threshold = info->shadow_read_threshold; cq 2141 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id_valid = info->ceq_id_valid; cq 2142 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->tph_en = info->tph_en; cq 2143 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->tph_val = info->tph_val; cq 2145 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq 2157 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq, cq 2166 drivers/infiniband/hw/i40iw/i40iw_ctrl.c if (cq->cq_uk.cq_id > I40IW_MAX_CQID) cq 2169 drivers/infiniband/hw/i40iw/i40iw_ctrl.c if (cq->ceq_id > I40IW_MAX_CEQID) cq 2172 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp = cq->dev->cqp; cq 2177 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 0, cq->cq_uk.cq_size); cq 2178 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 8, RS_64_1(cq, 1)); cq 2181 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); cq 2183 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); cq 2185 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 40, cq->shadow_area_pa); cq 2186 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); cq 2187 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); cq 2189 drivers/infiniband/hw/i40iw/i40iw_ctrl.c header = cq->cq_uk.cq_id | cq 2190 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | cq 2192 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | cq 2194 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | cq 2195 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | cq 2196 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | cq 2197 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | cq 2198 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | cq 2217 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq, cq 2225 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp = cq->dev->cqp; cq 2229 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 0, cq->cq_uk.cq_size); cq 2230 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 8, RS_64_1(cq, 1)); cq 2231 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 40, cq->shadow_area_pa); cq 2232 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); cq 2234 drivers/infiniband/hw/i40iw/i40iw_ctrl.c header = cq->cq_uk.cq_id | cq 2235 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | cq 2237 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | cq 2238 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | cq 2239 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | cq 2240 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | cq 2241 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | cq 2242 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | cq 2262 drivers/infiniband/hw/i40iw/i40iw_ctrl.c static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq, cq 2278 drivers/infiniband/hw/i40iw/i40iw_ctrl.c pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; cq 2284 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp = cq->dev->cqp; cq 2289 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->pbl_list = info->pbl_list; cq 2290 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_pa = info->cq_pa; cq 2291 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->first_pm_pbl_idx = info->first_pm_pbl_idx; cq 2293 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size; cq 2298 drivers/infiniband/hw/i40iw/i40iw_ctrl.c ceq_id_valid = cq->ceq_id_valid; cq 2299 drivers/infiniband/hw/i40iw/i40iw_ctrl.c ceq_id = ceq_id_valid ? cq->ceq_id : 0; cq 2301 drivers/infiniband/hw/i40iw/i40iw_ctrl.c virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map; cq 2304 drivers/infiniband/hw/i40iw/i40iw_ctrl.c (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); cq 2307 drivers/infiniband/hw/i40iw/i40iw_ctrl.c (cq->virtual_map ? cq->pbl_chunk_size : 0)); cq 2309 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->check_overflow; cq 2310 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->cq_uk.cq_size = cq_size; cq 2311 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id_valid = ceq_id_valid; cq 2312 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->ceq_id = ceq_id; cq 2313 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->virtual_map = virtual_map; cq 2314 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->first_pm_pbl_idx = first_pm_pbl_idx; cq 2315 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->pbl_chunk_size = pbl_chunk_size; cq 2316 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cq->check_overflow = check_overflow; cq 2319 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 8, RS_64_1(cq, 1)); cq 2322 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); cq 2323 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 40, cq->shadow_area_pa); cq 2324 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0)); cq 2325 drivers/infiniband/hw/i40iw/i40iw_ctrl.c set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); cq 2327 drivers/infiniband/hw/i40iw/i40iw_ctrl.c header = cq->cq_uk.cq_id | cq 2334 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | cq 2336 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | cq 2337 drivers/infiniband/hw/i40iw/i40iw_ctrl.c LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | cq 4124 drivers/infiniband/hw/i40iw/i40iw_ctrl.c pcmdinfo->in.u.cq_create.cq, cq 4131 drivers/infiniband/hw/i40iw/i40iw_ctrl.c pcmdinfo->in.u.cq_destroy.cq, cq 122 drivers/infiniband/hw/i40iw/i40iw_hw.c static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm) cq 132 drivers/infiniband/hw/i40iw/i40iw_hw.c ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info); cq 161 drivers/infiniband/hw/i40iw/i40iw_hw.c dev->ccq_ops->ccq_arm(cq); cq 186 drivers/infiniband/hw/i40iw/i40iw_hw.c struct i40iw_sc_cq *cq) cq 193 drivers/infiniband/hw/i40iw/i40iw_hw.c status = i40iw_puda_poll_completion(dev, cq, &compl_error); cq 206 drivers/infiniband/hw/i40iw/i40iw_hw.c dev->ccq_ops->ccq_arm(cq); cq 218 drivers/infiniband/hw/i40iw/i40iw_hw.c struct i40iw_sc_cq *cq; cq 223 drivers/infiniband/hw/i40iw/i40iw_hw.c cq = dev->ceq_ops->process_ceq(dev, sc_ceq); cq 224 drivers/infiniband/hw/i40iw/i40iw_hw.c if (!cq) cq 227 drivers/infiniband/hw/i40iw/i40iw_hw.c if (cq->cq_type == I40IW_CQ_TYPE_CQP) cq 228 drivers/infiniband/hw/i40iw/i40iw_hw.c i40iw_cqp_ce_handler(iwdev, cq, arm); cq 229 drivers/infiniband/hw/i40iw/i40iw_hw.c else if (cq->cq_type == I40IW_CQ_TYPE_IWARP) cq 230 drivers/infiniband/hw/i40iw/i40iw_hw.c i40iw_iwarp_ce_handler(iwdev, cq); cq 231 drivers/infiniband/hw/i40iw/i40iw_hw.c else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) || cq 232 drivers/infiniband/hw/i40iw/i40iw_hw.c (cq->cq_type == I40IW_CQ_TYPE_IEQ)) cq 233 drivers/infiniband/hw/i40iw/i40iw_hw.c i40iw_puda_ce_handler(iwdev, cq); cq 285 drivers/infiniband/hw/i40iw/i40iw_hw.c struct i40iw_sc_cq *cq = NULL; cq 378 drivers/infiniband/hw/i40iw/i40iw_hw.c cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx; cq 379 drivers/infiniband/hw/i40iw/i40iw_hw.c iwcq = (struct i40iw_cq *)cq->back_cq; cq 386 drivers/infiniband/hw/i40iw/i40iw_hw.c ibevent.element.cq = &iwcq->ibcq; cq 227 drivers/infiniband/hw/i40iw/i40iw_puda.c static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, cq 237 drivers/infiniband/hw/i40iw/i40iw_puda.c cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); cq 241 drivers/infiniband/hw/i40iw/i40iw_puda.c if (valid_bit != cq->cq_uk.polarity) cq 244 drivers/infiniband/hw/i40iw/i40iw_puda.c i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); cq 247 drivers/infiniband/hw/i40iw/i40iw_puda.c i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); cq 281 drivers/infiniband/hw/i40iw/i40iw_puda.c struct i40iw_sc_cq *cq, u32 *compl_err) cq 284 drivers/infiniband/hw/i40iw/i40iw_puda.c struct i40iw_cq_uk *cq_uk = &cq->cq_uk; cq 290 drivers/infiniband/hw/i40iw/i40iw_puda.c u8 cq_type = cq->cq_type; cq 294 drivers/infiniband/hw/i40iw/i40iw_puda.c rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; cq 300 drivers/infiniband/hw/i40iw/i40iw_puda.c ret = i40iw_puda_poll_info(cq, &info); cq 628 drivers/infiniband/hw/i40iw/i40iw_puda.c static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) cq 641 drivers/infiniband/hw/i40iw/i40iw_puda.c set_64bit_val(wqe, 0, cq->cq_uk.cq_size); cq 642 drivers/infiniband/hw/i40iw/i40iw_puda.c set_64bit_val(wqe, 8, RS_64_1(cq, 1)); cq 644 drivers/infiniband/hw/i40iw/i40iw_puda.c LS_64(cq->shadow_read_threshold, cq 646 drivers/infiniband/hw/i40iw/i40iw_puda.c set_64bit_val(wqe, 32, cq->cq_pa); cq 648 drivers/infiniband/hw/i40iw/i40iw_puda.c set_64bit_val(wqe, 40, cq->shadow_area_pa); cq 650 drivers/infiniband/hw/i40iw/i40iw_puda.c header = cq->cq_uk.cq_id | cq 675 drivers/infiniband/hw/i40iw/i40iw_puda.c struct i40iw_sc_cq *cq = &rsrc->cq; cq 682 drivers/infiniband/hw/i40iw/i40iw_puda.c cq->vsi = rsrc->vsi; cq 705 drivers/infiniband/hw/i40iw/i40iw_puda.c ret = dev->iw_priv_cq_ops->cq_init(cq, &info); cq 709 drivers/infiniband/hw/i40iw/i40iw_puda.c ret = i40iw_cqp_cq_create_cmd(dev, cq); cq 711 drivers/infiniband/hw/i40iw/i40iw_puda.c ret = i40iw_puda_cq_wqe(dev, cq); cq 762 drivers/infiniband/hw/i40iw/i40iw_puda.c i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq); cq 765 drivers/infiniband/hw/i40iw/i40iw_puda.c ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true); cq 976 drivers/infiniband/hw/i40iw/i40iw_puda.c dev->ccq_ops->ccq_arm(&rsrc->cq); cq 115 drivers/infiniband/hw/i40iw/i40iw_puda.h struct i40iw_sc_cq cq; cq 170 drivers/infiniband/hw/i40iw/i40iw_puda.h struct i40iw_sc_cq *cq, u32 *compl_err); cq 184 drivers/infiniband/hw/i40iw/i40iw_puda.h enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); cq 186 drivers/infiniband/hw/i40iw/i40iw_puda.h void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); cq 763 drivers/infiniband/hw/i40iw/i40iw_type.h bool cq; cq 1183 drivers/infiniband/hw/i40iw/i40iw_type.h struct i40iw_sc_cq *cq; cq 1189 drivers/infiniband/hw/i40iw/i40iw_type.h struct i40iw_sc_cq *cq; cq 699 drivers/infiniband/hw/i40iw/i40iw_uk.c static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq, cq 708 drivers/infiniband/hw/i40iw/i40iw_uk.c get_64bit_val(cq->shadow_area, 32, &temp_val); cq 722 drivers/infiniband/hw/i40iw/i40iw_uk.c set_64bit_val(cq->shadow_area, 32, temp_val); cq 726 drivers/infiniband/hw/i40iw/i40iw_uk.c writel(cq->cq_id, cq->cqe_alloc_reg); cq 734 drivers/infiniband/hw/i40iw/i40iw_uk.c static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq, cq 737 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count); cq 738 drivers/infiniband/hw/i40iw/i40iw_uk.c set_64bit_val(cq->shadow_area, 0, cq 739 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); cq 749 drivers/infiniband/hw/i40iw/i40iw_uk.c static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, cq 762 drivers/infiniband/hw/i40iw/i40iw_uk.c if (cq->avoid_mem_cflct) cq 763 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq); cq 765 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq); cq 770 drivers/infiniband/hw/i40iw/i40iw_uk.c if (polarity != cq->polarity) cq 827 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); cq 828 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_MOVE_TAIL(cq->cq_ring); cq 829 drivers/infiniband/hw/i40iw/i40iw_uk.c set_64bit_val(cq->shadow_area, 0, cq 830 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); cq 832 drivers/infiniband/hw/i40iw/i40iw_uk.c return i40iw_cq_poll_completion(cq, info); cq 877 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); cq 879 drivers/infiniband/hw/i40iw/i40iw_uk.c if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0) cq 880 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->polarity ^= 1; cq 882 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_MOVE_TAIL(cq->cq_ring); cq 883 drivers/infiniband/hw/i40iw/i40iw_uk.c set_64bit_val(cq->shadow_area, 0, cq 884 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); cq 1056 drivers/infiniband/hw/i40iw/i40iw_uk.c enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, cq 1062 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->cq_base = (struct i40iw_cqe *)info->cq_base; cq 1063 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->cq_id = info->cq_id; cq 1064 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->cq_size = info->cq_size; cq 1065 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->cqe_alloc_reg = info->cqe_alloc_reg; cq 1066 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->shadow_area = info->shadow_area; cq 1067 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->avoid_mem_cflct = info->avoid_mem_cflct; cq 1069 drivers/infiniband/hw/i40iw/i40iw_uk.c I40IW_RING_INIT(cq->cq_ring, cq->cq_size); cq 1070 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->polarity = 1; cq 1071 drivers/infiniband/hw/i40iw/i40iw_uk.c cq->ops = iw_cq_ops; cq 1090 drivers/infiniband/hw/i40iw/i40iw_uk.c void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq) cq 1097 drivers/infiniband/hw/i40iw/i40iw_uk.c cq_head = cq->cq_ring.head; cq 1098 drivers/infiniband/hw/i40iw/i40iw_uk.c temp = cq->polarity; cq 1100 drivers/infiniband/hw/i40iw/i40iw_uk.c if (cq->avoid_mem_cflct) cq 1101 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]); cq 1103 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)&cq->cq_base[cq_head]; cq 1114 drivers/infiniband/hw/i40iw/i40iw_uk.c cq_head = (cq_head + 1) % cq->cq_ring.size; cq 415 drivers/infiniband/hw/i40iw/i40iw_user.h enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, cq 420 drivers/infiniband/hw/i40iw/i40iw_user.h void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq); cq 1189 drivers/infiniband/hw/i40iw/i40iw_utils.c struct i40iw_sc_cq *cq) cq 1204 drivers/infiniband/hw/i40iw/i40iw_utils.c cqp_info->in.u.cq_create.cq = cq; cq 1255 drivers/infiniband/hw/i40iw/i40iw_utils.c void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) cq 1259 drivers/infiniband/hw/i40iw/i40iw_utils.c i40iw_cq_wq_destroy(iwdev, cq); cq 1020 drivers/infiniband/hw/i40iw/i40iw_verbs.c struct i40iw_sc_cq *cq = &iwcq->sc_cq; cq 1024 drivers/infiniband/hw/i40iw/i40iw_verbs.c i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id); cq 1032 drivers/infiniband/hw/i40iw/i40iw_verbs.c void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) cq 1046 drivers/infiniband/hw/i40iw/i40iw_verbs.c cqp_info->in.u.cq_destroy.cq = cq; cq 1062 drivers/infiniband/hw/i40iw/i40iw_verbs.c struct i40iw_sc_cq *cq; cq 1066 drivers/infiniband/hw/i40iw/i40iw_verbs.c cq = &iwcq->sc_cq; cq 1067 drivers/infiniband/hw/i40iw/i40iw_verbs.c i40iw_cq_wq_destroy(iwdev, cq); cq 1087 drivers/infiniband/hw/i40iw/i40iw_verbs.c struct i40iw_sc_cq *cq; cq 1110 drivers/infiniband/hw/i40iw/i40iw_verbs.c cq = &iwcq->sc_cq; cq 1111 drivers/infiniband/hw/i40iw/i40iw_verbs.c cq->back_cq = (void *)iwcq; cq 1177 drivers/infiniband/hw/i40iw/i40iw_verbs.c if (dev->iw_priv_cq_ops->cq_init(cq, &info)) { cq 1192 drivers/infiniband/hw/i40iw/i40iw_verbs.c cqp_info->in.u.cq_create.cq = cq; cq 1218 drivers/infiniband/hw/i40iw/i40iw_verbs.c i40iw_cq_wq_destroy(iwdev, cq); cq 43 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_cq_comp(struct mlx4_cq *cq) cq 45 drivers/infiniband/hw/mlx4/cq.c struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; cq 49 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) cq 56 drivers/infiniband/hw/mlx4/cq.c "on CQ %06x\n", type, cq->cqn); cq 60 drivers/infiniband/hw/mlx4/cq.c ibcq = &to_mibcq(cq)->ibcq; cq 64 drivers/infiniband/hw/mlx4/cq.c event.element.cq = ibcq; cq 74 drivers/infiniband/hw/mlx4/cq.c static void *get_cqe(struct mlx4_ib_cq *cq, int n) cq 76 drivers/infiniband/hw/mlx4/cq.c return get_cqe_from_buf(&cq->buf, n); cq 79 drivers/infiniband/hw/mlx4/cq.c static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) cq 81 drivers/infiniband/hw/mlx4/cq.c struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); cq 82 drivers/infiniband/hw/mlx4/cq.c struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); cq 85 drivers/infiniband/hw/mlx4/cq.c !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; cq 88 drivers/infiniband/hw/mlx4/cq.c static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) cq 90 drivers/infiniband/hw/mlx4/cq.c return get_sw_cqe(cq, cq->mcq.cons_index); cq 93 drivers/infiniband/hw/mlx4/cq.c int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) cq 95 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *mcq = to_mcq(cq); cq 96 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_dev *dev = to_mdev(cq->device); cq 182 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *cq = to_mcq(ibcq); cq 196 drivers/infiniband/hw/mlx4/cq.c cq->ibcq.cqe = entries - 1; cq 197 drivers/infiniband/hw/mlx4/cq.c mutex_init(&cq->resize_mutex); cq 198 drivers/infiniband/hw/mlx4/cq.c spin_lock_init(&cq->lock); cq 199 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 200 drivers/infiniband/hw/mlx4/cq.c cq->resize_umem = NULL; cq 201 drivers/infiniband/hw/mlx4/cq.c cq->create_flags = attr->flags; cq 202 drivers/infiniband/hw/mlx4/cq.c INIT_LIST_HEAD(&cq->send_qp_list); cq 203 drivers/infiniband/hw/mlx4/cq.c INIT_LIST_HEAD(&cq->recv_qp_list); cq 214 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem, cq 219 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &cq->db); cq 224 drivers/infiniband/hw/mlx4/cq.c cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; cq 226 drivers/infiniband/hw/mlx4/cq.c err = mlx4_db_alloc(dev->dev, &cq->db, 1); cq 230 drivers/infiniband/hw/mlx4/cq.c cq->mcq.set_ci_db = cq->db.db; cq 231 drivers/infiniband/hw/mlx4/cq.c cq->mcq.arm_db = cq->db.db + 1; cq 232 drivers/infiniband/hw/mlx4/cq.c *cq->mcq.set_ci_db = 0; cq 233 drivers/infiniband/hw/mlx4/cq.c *cq->mcq.arm_db = 0; cq 235 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); cq 239 drivers/infiniband/hw/mlx4/cq.c buf_addr = &cq->buf.buf; cq 242 drivers/infiniband/hw/mlx4/cq.c cq->mcq.usage = MLX4_RES_USAGE_DRIVER; cq 248 drivers/infiniband/hw/mlx4/cq.c err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, cq->db.dma, cq 249 drivers/infiniband/hw/mlx4/cq.c &cq->mcq, vector, 0, cq 250 drivers/infiniband/hw/mlx4/cq.c !!(cq->create_flags & cq 257 drivers/infiniband/hw/mlx4/cq.c cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; cq 259 drivers/infiniband/hw/mlx4/cq.c cq->mcq.comp = mlx4_ib_cq_comp; cq 260 drivers/infiniband/hw/mlx4/cq.c cq->mcq.event = mlx4_ib_cq_event; cq 263 drivers/infiniband/hw/mlx4/cq.c if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { cq 271 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_free(dev->dev, &cq->mcq); cq 275 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_db_unmap_user(context, &cq->db); cq 278 drivers/infiniband/hw/mlx4/cq.c mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); cq 280 drivers/infiniband/hw/mlx4/cq.c ib_umem_release(cq->umem); cq 282 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); cq 286 drivers/infiniband/hw/mlx4/cq.c mlx4_db_free(dev->dev, &cq->db); cq 291 drivers/infiniband/hw/mlx4/cq.c static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, cq 296 drivers/infiniband/hw/mlx4/cq.c if (cq->resize_buf) cq 299 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); cq 300 drivers/infiniband/hw/mlx4/cq.c if (!cq->resize_buf) cq 303 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); cq 305 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 306 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 310 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf->cqe = entries - 1; cq 315 drivers/infiniband/hw/mlx4/cq.c static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, cq 321 drivers/infiniband/hw/mlx4/cq.c if (cq->resize_umem) cq 327 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL); cq 328 drivers/infiniband/hw/mlx4/cq.c if (!cq->resize_buf) cq 331 drivers/infiniband/hw/mlx4/cq.c err = mlx4_ib_get_cq_umem(dev, udata, &cq->resize_buf->buf, cq 332 drivers/infiniband/hw/mlx4/cq.c &cq->resize_umem, ucmd.buf_addr, entries); cq 334 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 335 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 339 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf->cqe = entries - 1; cq 344 drivers/infiniband/hw/mlx4/cq.c static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) cq 348 drivers/infiniband/hw/mlx4/cq.c i = cq->mcq.cons_index; cq 349 drivers/infiniband/hw/mlx4/cq.c while (get_sw_cqe(cq, i)) cq 352 drivers/infiniband/hw/mlx4/cq.c return i - cq->mcq.cons_index; cq 355 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) cq 359 drivers/infiniband/hw/mlx4/cq.c int cqe_size = cq->buf.entry_size; cq 362 drivers/infiniband/hw/mlx4/cq.c i = cq->mcq.cons_index; cq 363 drivers/infiniband/hw/mlx4/cq.c cqe = get_cqe(cq, i & cq->ibcq.cqe); cq 367 drivers/infiniband/hw/mlx4/cq.c new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, cq 368 drivers/infiniband/hw/mlx4/cq.c (i + 1) & cq->resize_buf->cqe); cq 369 drivers/infiniband/hw/mlx4/cq.c memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); cq 373 drivers/infiniband/hw/mlx4/cq.c (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); cq 374 drivers/infiniband/hw/mlx4/cq.c cqe = get_cqe(cq, ++i & cq->ibcq.cqe); cq 377 drivers/infiniband/hw/mlx4/cq.c ++cq->mcq.cons_index; cq 383 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *cq = to_mcq(ibcq); cq 388 drivers/infiniband/hw/mlx4/cq.c mutex_lock(&cq->resize_mutex); cq 406 drivers/infiniband/hw/mlx4/cq.c err = mlx4_alloc_resize_umem(dev, cq, entries, udata); cq 411 drivers/infiniband/hw/mlx4/cq.c outst_cqe = mlx4_ib_get_outstanding_cqes(cq); cq 417 drivers/infiniband/hw/mlx4/cq.c err = mlx4_alloc_resize_buf(dev, cq, entries); cq 422 drivers/infiniband/hw/mlx4/cq.c mtt = cq->buf.mtt; cq 424 drivers/infiniband/hw/mlx4/cq.c err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); cq 430 drivers/infiniband/hw/mlx4/cq.c cq->buf = cq->resize_buf->buf; cq 431 drivers/infiniband/hw/mlx4/cq.c cq->ibcq.cqe = cq->resize_buf->cqe; cq 432 drivers/infiniband/hw/mlx4/cq.c ib_umem_release(cq->umem); cq 433 drivers/infiniband/hw/mlx4/cq.c cq->umem = cq->resize_umem; cq 435 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 436 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 437 drivers/infiniband/hw/mlx4/cq.c cq->resize_umem = NULL; cq 442 drivers/infiniband/hw/mlx4/cq.c spin_lock_irq(&cq->lock); cq 443 drivers/infiniband/hw/mlx4/cq.c if (cq->resize_buf) { cq 444 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_cq_resize_copy_cqes(cq); cq 445 drivers/infiniband/hw/mlx4/cq.c tmp_buf = cq->buf; cq 446 drivers/infiniband/hw/mlx4/cq.c tmp_cqe = cq->ibcq.cqe; cq 447 drivers/infiniband/hw/mlx4/cq.c cq->buf = cq->resize_buf->buf; cq 448 drivers/infiniband/hw/mlx4/cq.c cq->ibcq.cqe = cq->resize_buf->cqe; cq 450 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 451 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 453 drivers/infiniband/hw/mlx4/cq.c spin_unlock_irq(&cq->lock); cq 462 drivers/infiniband/hw/mlx4/cq.c mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); cq 464 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, cq 465 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf->cqe); cq 467 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 468 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 470 drivers/infiniband/hw/mlx4/cq.c ib_umem_release(cq->resize_umem); cq 471 drivers/infiniband/hw/mlx4/cq.c cq->resize_umem = NULL; cq 473 drivers/infiniband/hw/mlx4/cq.c mutex_unlock(&cq->resize_mutex); cq 478 drivers/infiniband/hw/mlx4/cq.c void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) cq 480 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_dev *dev = to_mdev(cq->device); cq 481 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *mcq = to_mcq(cq); cq 494 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); cq 585 drivers/infiniband/hw/mlx4/cq.c static void use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, cq 636 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, cq 645 drivers/infiniband/hw/mlx4/cq.c list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { cq 651 drivers/infiniband/hw/mlx4/cq.c list_for_each_entry(qp, &cq->recv_qp_list, cq_recv_list) { cq 661 drivers/infiniband/hw/mlx4/cq.c static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, cq 678 drivers/infiniband/hw/mlx4/cq.c cqe = next_cqe_sw(cq); cq 682 drivers/infiniband/hw/mlx4/cq.c if (cq->buf.entry_size == 64) cq 685 drivers/infiniband/hw/mlx4/cq.c ++cq->mcq.cons_index; cq 699 drivers/infiniband/hw/mlx4/cq.c if (cq->resize_buf) { cq 700 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); cq 702 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); cq 703 drivers/infiniband/hw/mlx4/cq.c cq->buf = cq->resize_buf->buf; cq 704 drivers/infiniband/hw/mlx4/cq.c cq->ibcq.cqe = cq->resize_buf->cqe; cq 706 drivers/infiniband/hw/mlx4/cq.c kfree(cq->resize_buf); cq 707 drivers/infiniband/hw/mlx4/cq.c cq->resize_buf = NULL; cq 720 drivers/infiniband/hw/mlx4/cq.c mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, cq 732 drivers/infiniband/hw/mlx4/cq.c msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, cq 842 drivers/infiniband/hw/mlx4/cq.c if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { cq 846 drivers/infiniband/hw/mlx4/cq.c use_tunnel_data(*cur_qp, cq, wc, tail, cqe, cq 883 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *cq = to_mcq(ibcq); cq 887 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device); cq 889 drivers/infiniband/hw/mlx4/cq.c spin_lock_irqsave(&cq->lock, flags); cq 891 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled); cq 896 drivers/infiniband/hw/mlx4/cq.c if (mlx4_ib_poll_one(cq, &cur_qp, wc + npolled)) cq 900 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_set_ci(&cq->mcq); cq 903 drivers/infiniband/hw/mlx4/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 919 drivers/infiniband/hw/mlx4/cq.c void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) cq 925 drivers/infiniband/hw/mlx4/cq.c int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; cq 934 drivers/infiniband/hw/mlx4/cq.c for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) cq 935 drivers/infiniband/hw/mlx4/cq.c if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) cq 942 drivers/infiniband/hw/mlx4/cq.c while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cq 943 drivers/infiniband/hw/mlx4/cq.c cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cq 951 drivers/infiniband/hw/mlx4/cq.c dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); cq 962 drivers/infiniband/hw/mlx4/cq.c cq->mcq.cons_index += nfreed; cq 968 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_set_ci(&cq->mcq); cq 972 drivers/infiniband/hw/mlx4/cq.c void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) cq 974 drivers/infiniband/hw/mlx4/cq.c spin_lock_irq(&cq->lock); cq 975 drivers/infiniband/hw/mlx4/cq.c __mlx4_ib_cq_clean(cq, qpn, srq); cq 976 drivers/infiniband/hw/mlx4/cq.c spin_unlock_irq(&cq->lock); cq 1299 drivers/infiniband/hw/mlx4/mad.c static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) cq 1302 drivers/infiniband/hw/mlx4/mad.c struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; cq 1739 drivers/infiniband/hw/mlx4/mad.c ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); cq 1741 drivers/infiniband/hw/mlx4/mad.c while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { cq 1811 drivers/infiniband/hw/mlx4/mad.c qp_init_attr.init_attr.send_cq = ctx->cq; cq 1812 drivers/infiniband/hw/mlx4/mad.c qp_init_attr.init_attr.recv_cq = ctx->cq; cq 1905 drivers/infiniband/hw/mlx4/mad.c ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); cq 1907 drivers/infiniband/hw/mlx4/mad.c while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { cq 2012 drivers/infiniband/hw/mlx4/mad.c ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, cq 2014 drivers/infiniband/hw/mlx4/mad.c if (IS_ERR(ctx->cq)) { cq 2015 drivers/infiniband/hw/mlx4/mad.c ret = PTR_ERR(ctx->cq); cq 2050 drivers/infiniband/hw/mlx4/mad.c ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); cq 2074 drivers/infiniband/hw/mlx4/mad.c ib_destroy_cq(ctx->cq); cq 2075 drivers/infiniband/hw/mlx4/mad.c ctx->cq = NULL; cq 2107 drivers/infiniband/hw/mlx4/mad.c ib_destroy_cq(ctx->cq); cq 2108 drivers/infiniband/hw/mlx4/mad.c ctx->cq = NULL; cq 2239 drivers/infiniband/hw/mlx4/mad.c ib_destroy_cq(sqp_ctx->cq); cq 2240 drivers/infiniband/hw/mlx4/mad.c sqp_ctx->cq = NULL; cq 537 drivers/infiniband/hw/mlx4/main.c props->max_cq = dev->dev->quotas.cq; cq 1241 drivers/infiniband/hw/mlx4/main.c xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr); cq 1242 drivers/infiniband/hw/mlx4/main.c if (IS_ERR(xrcd->cq)) { cq 1243 drivers/infiniband/hw/mlx4/main.c err = PTR_ERR(xrcd->cq); cq 1260 drivers/infiniband/hw/mlx4/main.c ib_destroy_cq(to_mxrcd(xrcd)->cq); cq 101 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_cq *cq; cq 458 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_cq *cq; cq 744 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); cq 748 drivers/infiniband/hw/mlx4/mlx4_ib.h void mlx4_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); cq 750 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 751 drivers/infiniband/hw/mlx4/mlx4_ib.h void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); cq 752 drivers/infiniband/hw/mlx4/mlx4_ib.h void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq); cq 1370 drivers/infiniband/hw/mlx4/qp.c *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); cq 1379 drivers/infiniband/hw/mlx4/qp.c to_mcq(qp->ibwq.cq); cq 1580 drivers/infiniband/hw/mlx4/qp.c init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; cq 2381 drivers/infiniband/hw/mlx4/qp.c send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); cq 3292 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_cq *cq; cq 3298 drivers/infiniband/hw/mlx4/qp.c cq = to_mcq(ib_cq); cq 3299 drivers/infiniband/hw/mlx4/qp.c spin_lock(&cq->lock); cq 3301 drivers/infiniband/hw/mlx4/qp.c spin_unlock(&cq->lock); cq 4182 drivers/infiniband/hw/mlx4/qp.c ib_qp_init_attr.recv_cq = init_attr->cq; cq 4418 drivers/infiniband/hw/mlx4/qp.c static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) cq 4428 drivers/infiniband/hw/mlx4/qp.c static void handle_drain_completion(struct ib_cq *cq, cq 4434 drivers/infiniband/hw/mlx4/qp.c if (cq->poll_ctx == IB_POLL_DIRECT) { cq 4436 drivers/infiniband/hw/mlx4/qp.c ib_process_cq_direct(cq, -1); cq 4441 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_cq *mcq = to_mcq(cq); cq 4455 drivers/infiniband/hw/mlx4/qp.c switch (cq->poll_ctx) { cq 4457 drivers/infiniband/hw/mlx4/qp.c irq_poll_disable(&cq->iop); cq 4458 drivers/infiniband/hw/mlx4/qp.c irq_poll_enable(&cq->iop); cq 4461 drivers/infiniband/hw/mlx4/qp.c cancel_work_sync(&cq->work); cq 4479 drivers/infiniband/hw/mlx4/qp.c struct ib_cq *cq = qp->send_cq; cq 4509 drivers/infiniband/hw/mlx4/qp.c handle_drain_completion(cq, &sdrain, dev); cq 4514 drivers/infiniband/hw/mlx4/qp.c struct ib_cq *cq = qp->recv_cq; cq 4539 drivers/infiniband/hw/mlx4/qp.c handle_drain_completion(cq, &rdrain, dev); cq 175 drivers/infiniband/hw/mlx4/srq.c to_mcq(init_attr->ext.cq)->mcq.cqn : 0; cq 40 drivers/infiniband/hw/mlx5/cq.c static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) cq 42 drivers/infiniband/hw/mlx5/cq.c struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; cq 49 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); cq 50 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); cq 51 drivers/infiniband/hw/mlx5/cq.c struct ib_cq *ibcq = &cq->ibcq; cq 63 drivers/infiniband/hw/mlx5/cq.c event.element.cq = ibcq; cq 68 drivers/infiniband/hw/mlx5/cq.c static void *get_cqe(struct mlx5_ib_cq *cq, int n) cq 70 drivers/infiniband/hw/mlx5/cq.c return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); cq 78 drivers/infiniband/hw/mlx5/cq.c static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) cq 80 drivers/infiniband/hw/mlx5/cq.c void *cqe = get_cqe(cq, n & cq->ibcq.cqe); cq 83 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cq 86 drivers/infiniband/hw/mlx5/cq.c !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { cq 93 drivers/infiniband/hw/mlx5/cq.c static void *next_cqe_sw(struct mlx5_ib_cq *cq) cq 95 drivers/infiniband/hw/mlx5/cq.c return get_sw_cqe(cq, cq->mcq.cons_index); cq 419 drivers/infiniband/hw/mlx5/cq.c static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, cq 426 drivers/infiniband/hw/mlx5/cq.c list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { cq 432 drivers/infiniband/hw/mlx5/cq.c list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { cq 439 drivers/infiniband/hw/mlx5/cq.c static int mlx5_poll_one(struct mlx5_ib_cq *cq, cq 443 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); cq 458 drivers/infiniband/hw/mlx5/cq.c cqe = next_cqe_sw(cq); cq 462 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cq 464 drivers/infiniband/hw/mlx5/cq.c ++cq->mcq.cons_index; cq 473 drivers/infiniband/hw/mlx5/cq.c if (likely(cq->resize_buf)) { cq 474 drivers/infiniband/hw/mlx5/cq.c free_cq_buf(dev, &cq->buf); cq 475 drivers/infiniband/hw/mlx5/cq.c cq->buf = *cq->resize_buf; cq 476 drivers/infiniband/hw/mlx5/cq.c kfree(cq->resize_buf); cq 477 drivers/infiniband/hw/mlx5/cq.c cq->resize_buf = NULL; cq 521 drivers/infiniband/hw/mlx5/cq.c "Requestor" : "Responder", cq->mcq.cqn); cq 557 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn, mr->sig->err_item.key, cq 570 drivers/infiniband/hw/mlx5/cq.c static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, cq 573 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); cq 577 drivers/infiniband/hw/mlx5/cq.c list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { cq 582 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn); cq 598 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = to_mcq(ibcq); cq 600 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); cq 606 drivers/infiniband/hw/mlx5/cq.c spin_lock_irqsave(&cq->lock, flags); cq 609 drivers/infiniband/hw/mlx5/cq.c if (unlikely(!list_empty(&cq->wc_list))) cq 610 drivers/infiniband/hw/mlx5/cq.c soft_polled = poll_soft_wc(cq, num_entries, wc, true); cq 612 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, cq 617 drivers/infiniband/hw/mlx5/cq.c if (unlikely(!list_empty(&cq->wc_list))) cq 618 drivers/infiniband/hw/mlx5/cq.c soft_polled = poll_soft_wc(cq, num_entries, wc, false); cq 621 drivers/infiniband/hw/mlx5/cq.c if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) cq 626 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_set_ci(&cq->mcq); cq 628 drivers/infiniband/hw/mlx5/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 636 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = to_mcq(ibcq); cq 641 drivers/infiniband/hw/mlx5/cq.c spin_lock_irqsave(&cq->lock, irq_flags); cq 642 drivers/infiniband/hw/mlx5/cq.c if (cq->notify_flags != IB_CQ_NEXT_COMP) cq 643 drivers/infiniband/hw/mlx5/cq.c cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; cq 645 drivers/infiniband/hw/mlx5/cq.c if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) cq 647 drivers/infiniband/hw/mlx5/cq.c spin_unlock_irqrestore(&cq->lock, irq_flags); cq 649 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_arm(&cq->mcq, cq 705 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq, int entries, u32 **cqb, cq 734 drivers/infiniband/hw/mlx5/cq.c cq->buf.umem = cq 737 drivers/infiniband/hw/mlx5/cq.c if (IS_ERR(cq->buf.umem)) { cq 738 drivers/infiniband/hw/mlx5/cq.c err = PTR_ERR(cq->buf.umem); cq 742 drivers/infiniband/hw/mlx5/cq.c err = mlx5_ib_db_map_user(context, udata, ucmd.db_addr, &cq->db); cq 746 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, cq 760 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); cq 805 drivers/infiniband/hw/mlx5/cq.c cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; cq 815 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_db_unmap_user(context, &cq->db); cq 818 drivers/infiniband/hw/mlx5/cq.c ib_umem_release(cq->buf.umem); cq 822 drivers/infiniband/hw/mlx5/cq.c static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) cq 827 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_db_unmap_user(context, &cq->db); cq 828 drivers/infiniband/hw/mlx5/cq.c ib_umem_release(cq->buf.umem); cq 831 drivers/infiniband/hw/mlx5/cq.c static void init_cq_frag_buf(struct mlx5_ib_cq *cq, cq 839 drivers/infiniband/hw/mlx5/cq.c cqe = get_cqe(cq, i); cq 845 drivers/infiniband/hw/mlx5/cq.c static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, cq 853 drivers/infiniband/hw/mlx5/cq.c err = mlx5_db_alloc(dev->mdev, &cq->db); cq 857 drivers/infiniband/hw/mlx5/cq.c cq->mcq.set_ci_db = cq->db.db; cq 858 drivers/infiniband/hw/mlx5/cq.c cq->mcq.arm_db = cq->db.db + 1; cq 859 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqe_sz = cqe_size; cq 861 drivers/infiniband/hw/mlx5/cq.c err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); cq 865 drivers/infiniband/hw/mlx5/cq.c init_cq_frag_buf(cq, &cq->buf); cq 869 drivers/infiniband/hw/mlx5/cq.c cq->buf.frag_buf.npages; cq 877 drivers/infiniband/hw/mlx5/cq.c mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); cq 881 drivers/infiniband/hw/mlx5/cq.c cq->buf.frag_buf.page_shift - cq 889 drivers/infiniband/hw/mlx5/cq.c free_cq_buf(dev, &cq->buf); cq 892 drivers/infiniband/hw/mlx5/cq.c mlx5_db_free(dev->mdev, &cq->db); cq 896 drivers/infiniband/hw/mlx5/cq.c static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) cq 898 drivers/infiniband/hw/mlx5/cq.c free_cq_buf(dev, &cq->buf); cq 899 drivers/infiniband/hw/mlx5/cq.c mlx5_db_free(dev->mdev, &cq->db); cq 904 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, cq 907 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq 917 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = to_mcq(ibcq); cq 939 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; cq 940 drivers/infiniband/hw/mlx5/cq.c mutex_init(&cq->resize_mutex); cq 941 drivers/infiniband/hw/mlx5/cq.c spin_lock_init(&cq->lock); cq 942 drivers/infiniband/hw/mlx5/cq.c cq->resize_buf = NULL; cq 943 drivers/infiniband/hw/mlx5/cq.c cq->resize_umem = NULL; cq 944 drivers/infiniband/hw/mlx5/cq.c cq->create_flags = attr->flags; cq 945 drivers/infiniband/hw/mlx5/cq.c INIT_LIST_HEAD(&cq->list_send_qp); cq 946 drivers/infiniband/hw/mlx5/cq.c INIT_LIST_HEAD(&cq->list_recv_qp); cq 949 drivers/infiniband/hw/mlx5/cq.c err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, cq 955 drivers/infiniband/hw/mlx5/cq.c err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, cq 960 drivers/infiniband/hw/mlx5/cq.c INIT_WORK(&cq->notify_work, notify_soft_wc_handler); cq 967 drivers/infiniband/hw/mlx5/cq.c cq->cqe_size = cqe_size; cq 972 drivers/infiniband/hw/mlx5/cq.c cq->private_flags & cq 977 drivers/infiniband/hw/mlx5/cq.c MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); cq 978 drivers/infiniband/hw/mlx5/cq.c if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) cq 981 drivers/infiniband/hw/mlx5/cq.c err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); cq 985 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); cq 986 drivers/infiniband/hw/mlx5/cq.c cq->mcq.irqn = irqn; cq 988 drivers/infiniband/hw/mlx5/cq.c cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; cq 990 drivers/infiniband/hw/mlx5/cq.c cq->mcq.comp = mlx5_ib_cq_comp; cq 991 drivers/infiniband/hw/mlx5/cq.c cq->mcq.event = mlx5_ib_cq_event; cq 993 drivers/infiniband/hw/mlx5/cq.c INIT_LIST_HEAD(&cq->wc_list); cq 996 drivers/infiniband/hw/mlx5/cq.c if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { cq 1006 drivers/infiniband/hw/mlx5/cq.c mlx5_core_destroy_cq(dev->mdev, &cq->mcq); cq 1011 drivers/infiniband/hw/mlx5/cq.c destroy_cq_user(cq, udata); cq 1013 drivers/infiniband/hw/mlx5/cq.c destroy_cq_kernel(dev, cq); cq 1017 drivers/infiniband/hw/mlx5/cq.c void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) cq 1019 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->device); cq 1020 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *mcq = to_mcq(cq); cq 1034 drivers/infiniband/hw/mlx5/cq.c void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) cq 1042 drivers/infiniband/hw/mlx5/cq.c if (!cq) cq 1051 drivers/infiniband/hw/mlx5/cq.c for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) cq 1052 drivers/infiniband/hw/mlx5/cq.c if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) cq 1058 drivers/infiniband/hw/mlx5/cq.c while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cq 1059 drivers/infiniband/hw/mlx5/cq.c cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cq 1060 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cq 1066 drivers/infiniband/hw/mlx5/cq.c dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); cq 1067 drivers/infiniband/hw/mlx5/cq.c dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; cq 1069 drivers/infiniband/hw/mlx5/cq.c memcpy(dest, cqe, cq->mcq.cqe_sz); cq 1076 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cons_index += nfreed; cq 1081 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_set_ci(&cq->mcq); cq 1085 drivers/infiniband/hw/mlx5/cq.c void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) cq 1087 drivers/infiniband/hw/mlx5/cq.c if (!cq) cq 1090 drivers/infiniband/hw/mlx5/cq.c spin_lock_irq(&cq->lock); cq 1091 drivers/infiniband/hw/mlx5/cq.c __mlx5_ib_cq_clean(cq, qpn, srq); cq 1092 drivers/infiniband/hw/mlx5/cq.c spin_unlock_irq(&cq->lock); cq 1095 drivers/infiniband/hw/mlx5/cq.c int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) cq 1097 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->device); cq 1098 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *mcq = to_mcq(cq); cq 1115 drivers/infiniband/hw/mlx5/cq.c static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, cq 1146 drivers/infiniband/hw/mlx5/cq.c cq->resize_umem = umem; cq 1152 drivers/infiniband/hw/mlx5/cq.c static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, cq 1157 drivers/infiniband/hw/mlx5/cq.c cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); cq 1158 drivers/infiniband/hw/mlx5/cq.c if (!cq->resize_buf) cq 1161 drivers/infiniband/hw/mlx5/cq.c err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); cq 1165 drivers/infiniband/hw/mlx5/cq.c init_cq_frag_buf(cq, cq->resize_buf); cq 1170 drivers/infiniband/hw/mlx5/cq.c kfree(cq->resize_buf); cq 1174 drivers/infiniband/hw/mlx5/cq.c static int copy_resize_cqes(struct mlx5_ib_cq *cq) cq 1176 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); cq 1187 drivers/infiniband/hw/mlx5/cq.c ssize = cq->buf.cqe_size; cq 1188 drivers/infiniband/hw/mlx5/cq.c dsize = cq->resize_buf->cqe_size; cq 1194 drivers/infiniband/hw/mlx5/cq.c i = cq->mcq.cons_index; cq 1195 drivers/infiniband/hw/mlx5/cq.c scqe = get_sw_cqe(cq, i); cq 1204 drivers/infiniband/hw/mlx5/cq.c dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, cq 1205 drivers/infiniband/hw/mlx5/cq.c (i + 1) & cq->resize_buf->nent); cq 1207 drivers/infiniband/hw/mlx5/cq.c sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); cq 1212 drivers/infiniband/hw/mlx5/cq.c scqe = get_sw_cqe(cq, i); cq 1221 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn); cq 1225 drivers/infiniband/hw/mlx5/cq.c ++cq->mcq.cons_index; cq 1232 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = to_mcq(ibcq); cq 1263 drivers/infiniband/hw/mlx5/cq.c mutex_lock(&cq->resize_mutex); cq 1265 drivers/infiniband/hw/mlx5/cq.c err = resize_user(dev, cq, entries, udata, &npas, &page_shift, cq 1269 drivers/infiniband/hw/mlx5/cq.c err = resize_kernel(dev, cq, entries, cqe_size); cq 1271 drivers/infiniband/hw/mlx5/cq.c struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf; cq 1292 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, cq 1295 drivers/infiniband/hw/mlx5/cq.c mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); cq 1309 drivers/infiniband/hw/mlx5/cq.c cq->private_flags & cq 1314 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); cq 1316 drivers/infiniband/hw/mlx5/cq.c err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); cq 1321 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; cq 1322 drivers/infiniband/hw/mlx5/cq.c ib_umem_release(cq->buf.umem); cq 1323 drivers/infiniband/hw/mlx5/cq.c cq->buf.umem = cq->resize_umem; cq 1324 drivers/infiniband/hw/mlx5/cq.c cq->resize_umem = NULL; cq 1329 drivers/infiniband/hw/mlx5/cq.c spin_lock_irqsave(&cq->lock, flags); cq 1330 drivers/infiniband/hw/mlx5/cq.c if (cq->resize_buf) { cq 1331 drivers/infiniband/hw/mlx5/cq.c err = copy_resize_cqes(cq); cq 1333 drivers/infiniband/hw/mlx5/cq.c tbuf = cq->buf; cq 1334 drivers/infiniband/hw/mlx5/cq.c cq->buf = *cq->resize_buf; cq 1335 drivers/infiniband/hw/mlx5/cq.c kfree(cq->resize_buf); cq 1336 drivers/infiniband/hw/mlx5/cq.c cq->resize_buf = NULL; cq 1340 drivers/infiniband/hw/mlx5/cq.c cq->ibcq.cqe = entries - 1; cq 1341 drivers/infiniband/hw/mlx5/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 1345 drivers/infiniband/hw/mlx5/cq.c mutex_unlock(&cq->resize_mutex); cq 1354 drivers/infiniband/hw/mlx5/cq.c ib_umem_release(cq->resize_umem); cq 1356 drivers/infiniband/hw/mlx5/cq.c free_cq_buf(dev, cq->resize_buf); cq 1357 drivers/infiniband/hw/mlx5/cq.c cq->resize_buf = NULL; cq 1360 drivers/infiniband/hw/mlx5/cq.c mutex_unlock(&cq->resize_mutex); cq 1366 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq; cq 1371 drivers/infiniband/hw/mlx5/cq.c cq = to_mcq(ibcq); cq 1372 drivers/infiniband/hw/mlx5/cq.c return cq->cqe_size; cq 1379 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = to_mcq(ibcq); cq 1387 drivers/infiniband/hw/mlx5/cq.c spin_lock_irqsave(&cq->lock, flags); cq 1388 drivers/infiniband/hw/mlx5/cq.c list_add_tail(&soft_wc->list, &cq->wc_list); cq 1389 drivers/infiniband/hw/mlx5/cq.c if (cq->notify_flags == IB_CQ_NEXT_COMP || cq 1391 drivers/infiniband/hw/mlx5/cq.c cq->notify_flags = 0; cq 1392 drivers/infiniband/hw/mlx5/cq.c schedule_work(&cq->notify_work); cq 1394 drivers/infiniband/hw/mlx5/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 50 drivers/infiniband/hw/mlx5/gsi.c struct ib_cq *cq; cq 96 drivers/infiniband/hw/mlx5/gsi.c static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) cq 98 drivers/infiniband/hw/mlx5/gsi.c struct mlx5_ib_gsi_qp *gsi = cq->cq_context; cq 171 drivers/infiniband/hw/mlx5/gsi.c gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0, cq 173 drivers/infiniband/hw/mlx5/gsi.c if (IS_ERR(gsi->cq)) { cq 175 drivers/infiniband/hw/mlx5/gsi.c PTR_ERR(gsi->cq)); cq 176 drivers/infiniband/hw/mlx5/gsi.c ret = PTR_ERR(gsi->cq); cq 181 drivers/infiniband/hw/mlx5/gsi.c hw_init_attr.send_cq = gsi->cq; cq 202 drivers/infiniband/hw/mlx5/gsi.c ib_free_cq(gsi->cq); cq 242 drivers/infiniband/hw/mlx5/gsi.c ib_free_cq(gsi->cq); cq 257 drivers/infiniband/hw/mlx5/gsi.c .send_cq = gsi->cq, cq 4794 drivers/infiniband/hw/mlx5/main.c if (dev->umrc.cq) cq 4795 drivers/infiniband/hw/mlx5/main.c ib_free_cq(dev->umrc.cq); cq 4809 drivers/infiniband/hw/mlx5/main.c struct ib_cq *cq; cq 4827 drivers/infiniband/hw/mlx5/main.c cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); cq 4828 drivers/infiniband/hw/mlx5/main.c if (IS_ERR(cq)) { cq 4830 drivers/infiniband/hw/mlx5/main.c ret = PTR_ERR(cq); cq 4834 drivers/infiniband/hw/mlx5/main.c init_attr->send_cq = cq; cq 4835 drivers/infiniband/hw/mlx5/main.c init_attr->recv_cq = cq; cq 4882 drivers/infiniband/hw/mlx5/main.c dev->umrc.cq = cq; cq 4902 drivers/infiniband/hw/mlx5/main.c ib_free_cq(cq); cq 4903 drivers/infiniband/hw/mlx5/main.c dev->umrc.cq = NULL; cq 4992 drivers/infiniband/hw/mlx5/main.c attr.ext.cq = devr->c0; cq 5005 drivers/infiniband/hw/mlx5/main.c devr->s0->ext.cq = devr->c0; cq 5011 drivers/infiniband/hw/mlx5/main.c atomic_inc(&devr->s0->ext.cq->usecnt); cq 5028 drivers/infiniband/hw/mlx5/main.c devr->s1->ext.cq = devr->c0; cq 653 drivers/infiniband/hw/mlx5/mlx5_ib.h struct ib_cq *cq; cq 1103 drivers/infiniband/hw/mlx5/mlx5_ib.h void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); cq 1104 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); cq 1142 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); cq 1145 drivers/infiniband/hw/mlx5/mlx5_ib.h int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); cq 806 drivers/infiniband/hw/mlx5/mr.c static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) cq 4072 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_cq *cq; cq 4079 drivers/infiniband/hw/mlx5/qp.c cq = to_mcq(ib_cq); cq 4080 drivers/infiniband/hw/mlx5/qp.c spin_lock(&cq->lock); cq 4082 drivers/infiniband/hw/mlx5/qp.c spin_unlock(&cq->lock); cq 5942 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); cq 6380 drivers/infiniband/hw/mlx5/qp.c static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) cq 6390 drivers/infiniband/hw/mlx5/qp.c static void handle_drain_completion(struct ib_cq *cq, cq 6396 drivers/infiniband/hw/mlx5/qp.c if (cq->poll_ctx == IB_POLL_DIRECT) { cq 6398 drivers/infiniband/hw/mlx5/qp.c ib_process_cq_direct(cq, -1); cq 6403 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_cq *mcq = to_mcq(cq); cq 6417 drivers/infiniband/hw/mlx5/qp.c switch (cq->poll_ctx) { cq 6419 drivers/infiniband/hw/mlx5/qp.c irq_poll_disable(&cq->iop); cq 6420 drivers/infiniband/hw/mlx5/qp.c irq_poll_enable(&cq->iop); cq 6423 drivers/infiniband/hw/mlx5/qp.c cancel_work_sync(&cq->work); cq 6441 drivers/infiniband/hw/mlx5/qp.c struct ib_cq *cq = qp->send_cq; cq 6471 drivers/infiniband/hw/mlx5/qp.c handle_drain_completion(cq, &sdrain, dev); cq 6476 drivers/infiniband/hw/mlx5/qp.c struct ib_cq *cq = qp->recv_cq; cq 6501 drivers/infiniband/hw/mlx5/qp.c handle_drain_completion(cq, &rdrain, dev); cq 295 drivers/infiniband/hw/mlx5/srq.c in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; cq 169 drivers/infiniband/hw/mthca/mthca_cq.c static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) cq 171 drivers/infiniband/hw/mthca/mthca_cq.c return get_cqe_from_buf(&cq->buf, entry); cq 179 drivers/infiniband/hw/mthca/mthca_cq.c static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) cq 181 drivers/infiniband/hw/mthca/mthca_cq.c return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); cq 204 drivers/infiniband/hw/mthca/mthca_cq.c static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, cq 208 drivers/infiniband/hw/mthca/mthca_cq.c *cq->set_ci_db = cpu_to_be32(cq->cons_index); cq 211 drivers/infiniband/hw/mthca/mthca_cq.c mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, cq 219 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq; cq 221 drivers/infiniband/hw/mthca/mthca_cq.c cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); cq 223 drivers/infiniband/hw/mthca/mthca_cq.c if (!cq) { cq 228 drivers/infiniband/hw/mthca/mthca_cq.c ++cq->arm_sn; cq 230 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq 236 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq; cq 241 drivers/infiniband/hw/mthca/mthca_cq.c cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); cq 242 drivers/infiniband/hw/mthca/mthca_cq.c if (cq) cq 243 drivers/infiniband/hw/mthca/mthca_cq.c ++cq->refcount; cq 247 drivers/infiniband/hw/mthca/mthca_cq.c if (!cq) { cq 254 drivers/infiniband/hw/mthca/mthca_cq.c event.element.cq = &cq->ibcq; cq 255 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->ibcq.event_handler) cq 256 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.event_handler(&event, cq->ibcq.cq_context); cq 259 drivers/infiniband/hw/mthca/mthca_cq.c if (!--cq->refcount) cq 260 drivers/infiniband/hw/mthca/mthca_cq.c wake_up(&cq->wait); cq 273 drivers/infiniband/hw/mthca/mthca_cq.c void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, cq 280 drivers/infiniband/hw/mthca/mthca_cq.c spin_lock_irq(&cq->lock); cq 289 drivers/infiniband/hw/mthca/mthca_cq.c for (prod_index = cq->cons_index; cq 290 drivers/infiniband/hw/mthca/mthca_cq.c cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe)); cq 292 drivers/infiniband/hw/mthca/mthca_cq.c if (prod_index == cq->cons_index + cq->ibcq.cqe) cq 297 drivers/infiniband/hw/mthca/mthca_cq.c qpn, cq->cqn, cq->cons_index, prod_index); cq 303 drivers/infiniband/hw/mthca/mthca_cq.c while ((int) --prod_index - (int) cq->cons_index >= 0) { cq 304 drivers/infiniband/hw/mthca/mthca_cq.c cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); cq 310 drivers/infiniband/hw/mthca/mthca_cq.c memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), cq 316 drivers/infiniband/hw/mthca/mthca_cq.c set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); cq 318 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index += nfreed; cq 319 drivers/infiniband/hw/mthca/mthca_cq.c update_cons_index(dev, cq, nfreed); cq 322 drivers/infiniband/hw/mthca/mthca_cq.c spin_unlock_irq(&cq->lock); cq 325 drivers/infiniband/hw/mthca/mthca_cq.c void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) cq 335 drivers/infiniband/hw/mthca/mthca_cq.c if (!mthca_is_memfree(to_mdev(cq->ibcq.device)) && cq 336 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.cqe < cq->resize_buf->cqe) { cq 337 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index &= cq->ibcq.cqe; cq 338 drivers/infiniband/hw/mthca/mthca_cq.c if (cqe_sw(get_cqe(cq, cq->ibcq.cqe))) cq 339 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index -= cq->ibcq.cqe + 1; cq 342 drivers/infiniband/hw/mthca/mthca_cq.c for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) cq 343 drivers/infiniband/hw/mthca/mthca_cq.c memcpy(get_cqe_from_buf(&cq->resize_buf->buf, cq 344 drivers/infiniband/hw/mthca/mthca_cq.c i & cq->resize_buf->cqe), cq 345 drivers/infiniband/hw/mthca/mthca_cq.c get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE); cq 372 drivers/infiniband/hw/mthca/mthca_cq.c static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, cq 384 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn, cq->cons_index); cq 479 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq, cq 493 drivers/infiniband/hw/mthca/mthca_cq.c cqe = next_cqe_sw(cq); cq 505 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn), cq 571 drivers/infiniband/hw/mthca/mthca_cq.c handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, cq 648 drivers/infiniband/hw/mthca/mthca_cq.c ++cq->cons_index; cq 658 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq = to_mcq(ibcq); cq 665 drivers/infiniband/hw/mthca/mthca_cq.c spin_lock_irqsave(&cq->lock, flags); cq 670 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_poll_one(dev, cq, &qp, cq 679 drivers/infiniband/hw/mthca/mthca_cq.c update_cons_index(dev, cq, freed); cq 688 drivers/infiniband/hw/mthca/mthca_cq.c if (unlikely(err == -EAGAIN && cq->resize_buf && cq 689 drivers/infiniband/hw/mthca/mthca_cq.c cq->resize_buf->state == CQ_RESIZE_READY)) { cq 698 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index &= cq->ibcq.cqe; cq 700 drivers/infiniband/hw/mthca/mthca_cq.c if (cqe_sw(get_cqe_from_buf(&cq->resize_buf->buf, cq 701 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index & cq->resize_buf->cqe))) { cq 705 drivers/infiniband/hw/mthca/mthca_cq.c tbuf = cq->buf; cq 706 drivers/infiniband/hw/mthca/mthca_cq.c tcqe = cq->ibcq.cqe; cq 707 drivers/infiniband/hw/mthca/mthca_cq.c cq->buf = cq->resize_buf->buf; cq 708 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.cqe = cq->resize_buf->cqe; cq 710 drivers/infiniband/hw/mthca/mthca_cq.c cq->resize_buf->buf = tbuf; cq 711 drivers/infiniband/hw/mthca/mthca_cq.c cq->resize_buf->cqe = tcqe; cq 712 drivers/infiniband/hw/mthca/mthca_cq.c cq->resize_buf->state = CQ_RESIZE_SWAPPED; cq 718 drivers/infiniband/hw/mthca/mthca_cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 723 drivers/infiniband/hw/mthca/mthca_cq.c int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) cq 728 drivers/infiniband/hw/mthca/mthca_cq.c to_mcq(cq)->cqn; cq 730 drivers/infiniband/hw/mthca/mthca_cq.c mthca_write64(dbhi, 0xffffffff, to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, cq 731 drivers/infiniband/hw/mthca/mthca_cq.c MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); cq 738 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq = to_mcq(ibcq); cq 741 drivers/infiniband/hw/mthca/mthca_cq.c u32 sn = cq->arm_sn & 3; cq 743 drivers/infiniband/hw/mthca/mthca_cq.c db_rec[0] = cpu_to_be32(cq->cons_index); cq 744 drivers/infiniband/hw/mthca/mthca_cq.c db_rec[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | cq 748 drivers/infiniband/hw/mthca/mthca_cq.c mthca_write_db_rec(db_rec, cq->arm_db); cq 759 drivers/infiniband/hw/mthca/mthca_cq.c MTHCA_ARBEL_CQ_DB_REQ_NOT) | cq->cqn; cq 761 drivers/infiniband/hw/mthca/mthca_cq.c mthca_write64(dbhi, cq->cons_index, cq 770 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq) cq 776 drivers/infiniband/hw/mthca/mthca_cq.c cq->ibcq.cqe = nent - 1; cq 777 drivers/infiniband/hw/mthca/mthca_cq.c cq->is_kernel = !ctx; cq 779 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn = mthca_alloc(&dev->cq_table.alloc); cq 780 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->cqn == -1) cq 784 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); cq 788 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel) { cq 789 drivers/infiniband/hw/mthca/mthca_cq.c cq->arm_sn = 1; cq 793 drivers/infiniband/hw/mthca/mthca_cq.c cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq 794 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn, &cq->set_ci_db); cq 795 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->set_ci_db_index < 0) cq 798 drivers/infiniband/hw/mthca/mthca_cq.c cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq 799 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn, &cq->arm_db); cq 800 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->arm_db_index < 0) cq 811 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel) { cq 812 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_alloc_cq_buf(dev, &cq->buf, nent); cq 817 drivers/infiniband/hw/mthca/mthca_cq.c spin_lock_init(&cq->lock); cq 818 drivers/infiniband/hw/mthca/mthca_cq.c cq->refcount = 1; cq 819 drivers/infiniband/hw/mthca/mthca_cq.c init_waitqueue_head(&cq->wait); cq 820 drivers/infiniband/hw/mthca/mthca_cq.c mutex_init(&cq->mutex); cq 834 drivers/infiniband/hw/mthca/mthca_cq.c cq_context->lkey = cpu_to_be32(cq->buf.mr.ibmr.lkey); cq 835 drivers/infiniband/hw/mthca/mthca_cq.c cq_context->cqn = cpu_to_be32(cq->cqn); cq 838 drivers/infiniband/hw/mthca/mthca_cq.c cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index); cq 839 drivers/infiniband/hw/mthca/mthca_cq.c cq_context->state_db = cpu_to_be32(cq->arm_db_index); cq 842 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn); cq 849 drivers/infiniband/hw/mthca/mthca_cq.c if (mthca_array_set(&dev->cq_table.cq, cq 850 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn & (dev->limits.num_cqs - 1), cq 851 drivers/infiniband/hw/mthca/mthca_cq.c cq)) { cq 857 drivers/infiniband/hw/mthca/mthca_cq.c cq->cons_index = 0; cq 864 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel) cq 865 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); cq 871 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel && mthca_is_memfree(dev)) cq 872 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); cq 875 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel && mthca_is_memfree(dev)) cq 876 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); cq 879 drivers/infiniband/hw/mthca/mthca_cq.c mthca_table_put(dev, dev->cq_table.table, cq->cqn); cq 882 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free(&dev->cq_table.alloc, cq->cqn); cq 887 drivers/infiniband/hw/mthca/mthca_cq.c static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) cq 892 drivers/infiniband/hw/mthca/mthca_cq.c c = cq->refcount; cq 899 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_cq *cq) cq 910 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn); cq 919 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn, cq->cons_index, cq 920 drivers/infiniband/hw/mthca/mthca_cq.c cq->is_kernel ? !!next_cqe_sw(cq) : 0); cq 926 drivers/infiniband/hw/mthca/mthca_cq.c mthca_array_clear(&dev->cq_table.cq, cq 927 drivers/infiniband/hw/mthca/mthca_cq.c cq->cqn & (dev->limits.num_cqs - 1)); cq 928 drivers/infiniband/hw/mthca/mthca_cq.c --cq->refcount; cq 936 drivers/infiniband/hw/mthca/mthca_cq.c wait_event(cq->wait, !get_cq_refcount(dev, cq)); cq 938 drivers/infiniband/hw/mthca/mthca_cq.c if (cq->is_kernel) { cq 939 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); cq 941 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); cq 942 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); cq 946 drivers/infiniband/hw/mthca/mthca_cq.c mthca_table_put(dev, dev->cq_table.table, cq->cqn); cq 947 drivers/infiniband/hw/mthca/mthca_cq.c mthca_free(&dev->cq_table.alloc, cq->cqn); cq 964 drivers/infiniband/hw/mthca/mthca_cq.c err = mthca_array_init(&dev->cq_table.cq, cq 974 drivers/infiniband/hw/mthca/mthca_cq.c mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); cq 242 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_array cq; cq 496 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 497 drivers/infiniband/hw/mthca/mthca_dev.h int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 500 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_cq *cq); cq 502 drivers/infiniband/hw/mthca/mthca_dev.h struct mthca_cq *cq); cq 506 drivers/infiniband/hw/mthca/mthca_dev.h void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, cq 508 drivers/infiniband/hw/mthca/mthca_dev.h void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); cq 611 drivers/infiniband/hw/mthca/mthca_provider.c struct mthca_cq *cq; cq 640 drivers/infiniband/hw/mthca/mthca_provider.c cq = to_mcq(ibcq); cq 643 drivers/infiniband/hw/mthca/mthca_provider.c cq->buf.mr.ibmr.lkey = ucmd.lkey; cq 644 drivers/infiniband/hw/mthca/mthca_provider.c cq->set_ci_db_index = ucmd.set_db_index; cq 645 drivers/infiniband/hw/mthca/mthca_provider.c cq->arm_db_index = ucmd.arm_db_index; cq 653 drivers/infiniband/hw/mthca/mthca_provider.c cq); cq 657 drivers/infiniband/hw/mthca/mthca_provider.c if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) { cq 658 drivers/infiniband/hw/mthca/mthca_provider.c mthca_free_cq(to_mdev(ibdev), cq); cq 663 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf = NULL; cq 680 drivers/infiniband/hw/mthca/mthca_provider.c static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq, cq 685 drivers/infiniband/hw/mthca/mthca_provider.c spin_lock_irq(&cq->lock); cq 686 drivers/infiniband/hw/mthca/mthca_provider.c if (cq->resize_buf) { cq 691 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); cq 692 drivers/infiniband/hw/mthca/mthca_provider.c if (!cq->resize_buf) { cq 697 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf->state = CQ_RESIZE_ALLOC; cq 702 drivers/infiniband/hw/mthca/mthca_provider.c spin_unlock_irq(&cq->lock); cq 707 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); cq 709 drivers/infiniband/hw/mthca/mthca_provider.c spin_lock_irq(&cq->lock); cq 710 drivers/infiniband/hw/mthca/mthca_provider.c kfree(cq->resize_buf); cq 711 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf = NULL; cq 712 drivers/infiniband/hw/mthca/mthca_provider.c spin_unlock_irq(&cq->lock); cq 716 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf->cqe = entries - 1; cq 718 drivers/infiniband/hw/mthca/mthca_provider.c spin_lock_irq(&cq->lock); cq 719 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf->state = CQ_RESIZE_READY; cq 720 drivers/infiniband/hw/mthca/mthca_provider.c spin_unlock_irq(&cq->lock); cq 728 drivers/infiniband/hw/mthca/mthca_provider.c struct mthca_cq *cq = to_mcq(ibcq); cq 736 drivers/infiniband/hw/mthca/mthca_provider.c mutex_lock(&cq->mutex); cq 744 drivers/infiniband/hw/mthca/mthca_provider.c if (cq->is_kernel) { cq 745 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_alloc_resize_buf(dev, cq, entries); cq 748 drivers/infiniband/hw/mthca/mthca_provider.c lkey = cq->resize_buf->buf.mr.ibmr.lkey; cq 757 drivers/infiniband/hw/mthca/mthca_provider.c ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); cq 760 drivers/infiniband/hw/mthca/mthca_provider.c if (cq->resize_buf) { cq 761 drivers/infiniband/hw/mthca/mthca_provider.c mthca_free_cq_buf(dev, &cq->resize_buf->buf, cq 762 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf->cqe); cq 763 drivers/infiniband/hw/mthca/mthca_provider.c kfree(cq->resize_buf); cq 764 drivers/infiniband/hw/mthca/mthca_provider.c spin_lock_irq(&cq->lock); cq 765 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf = NULL; cq 766 drivers/infiniband/hw/mthca/mthca_provider.c spin_unlock_irq(&cq->lock); cq 771 drivers/infiniband/hw/mthca/mthca_provider.c if (cq->is_kernel) { cq 775 drivers/infiniband/hw/mthca/mthca_provider.c spin_lock_irq(&cq->lock); cq 776 drivers/infiniband/hw/mthca/mthca_provider.c if (cq->resize_buf->state == CQ_RESIZE_READY) { cq 777 drivers/infiniband/hw/mthca/mthca_provider.c mthca_cq_resize_copy_cqes(cq); cq 778 drivers/infiniband/hw/mthca/mthca_provider.c tbuf = cq->buf; cq 779 drivers/infiniband/hw/mthca/mthca_provider.c tcqe = cq->ibcq.cqe; cq 780 drivers/infiniband/hw/mthca/mthca_provider.c cq->buf = cq->resize_buf->buf; cq 781 drivers/infiniband/hw/mthca/mthca_provider.c cq->ibcq.cqe = cq->resize_buf->cqe; cq 783 drivers/infiniband/hw/mthca/mthca_provider.c tbuf = cq->resize_buf->buf; cq 784 drivers/infiniband/hw/mthca/mthca_provider.c tcqe = cq->resize_buf->cqe; cq 787 drivers/infiniband/hw/mthca/mthca_provider.c kfree(cq->resize_buf); cq 788 drivers/infiniband/hw/mthca/mthca_provider.c cq->resize_buf = NULL; cq 789 drivers/infiniband/hw/mthca/mthca_provider.c spin_unlock_irq(&cq->lock); cq 796 drivers/infiniband/hw/mthca/mthca_provider.c mutex_unlock(&cq->mutex); cq 801 drivers/infiniband/hw/mthca/mthca_provider.c static void mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) cq 810 drivers/infiniband/hw/mthca/mthca_provider.c mthca_unmap_user_db(to_mdev(cq->device), cq 813 drivers/infiniband/hw/mthca/mthca_provider.c to_mcq(cq)->arm_db_index); cq 814 drivers/infiniband/hw/mthca/mthca_provider.c mthca_unmap_user_db(to_mdev(cq->device), cq 817 drivers/infiniband/hw/mthca/mthca_provider.c to_mcq(cq)->set_ci_db_index); cq 819 drivers/infiniband/hw/mthca/mthca_provider.c mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); cq 1570 drivers/infiniband/hw/mthca/mthca_qp.c struct mthca_cq *cq; cq 1576 drivers/infiniband/hw/mthca/mthca_qp.c cq = to_mcq(ib_cq); cq 1577 drivers/infiniband/hw/mthca/mthca_qp.c spin_lock(&cq->lock); cq 1579 drivers/infiniband/hw/mthca/mthca_qp.c spin_unlock(&cq->lock); cq 159 drivers/infiniband/hw/ocrdma/ocrdma.h struct ocrdma_queue_info cq; cq 498 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) cq 502 drivers/infiniband/hw/ocrdma/ocrdma.h return (cqe_valid == cq->phase); cq 124 drivers/infiniband/hw/ocrdma/ocrdma_hw.c (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe))); cq 133 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1); cq 524 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_queue_info *cq, cq 536 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) << cq 538 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size); cq 542 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe); cq 544 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE, cq 545 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->dma, PAGE_SIZE_4K); cq 549 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); cq 550 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->created = true; cq 566 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_queue_info *cq) cq 580 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT); cq 588 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->async_cqid_ringsize = cq->id; cq 609 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN, cq 615 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); cq 628 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); cq 631 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0); cq 637 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ); cq 639 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_free_q(dev, &dev->mq.cq); cq 646 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_queue_info *mbxq, *cq; cq 657 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq = &dev->mq.cq; cq 658 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq->created) { cq 659 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ); cq 660 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_free_q(dev, cq); cq 679 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq = NULL; cq 707 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq = dev->cq_tbl[cqid]; cq 708 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq == NULL) { cq 721 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.element.cq = &cq->ibcq; cq 727 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ib_evt.element.cq = &cq->ibcq; cq 787 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq->ibcq.event_handler) cq 788 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context); cq 898 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped); cq 903 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq, bool sq) cq 908 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); cq 926 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (qp->sq_cq == cq) cq 936 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq) cq 952 drivers/infiniband/hw/ocrdma/ocrdma_hw.c bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true); cq 954 drivers/infiniband/hw/ocrdma/ocrdma_hw.c bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false); cq 968 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq; cq 973 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq = dev->cq_tbl[cq_idx]; cq 974 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq == NULL) cq 977 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq->ibcq.comp_handler) { cq 978 drivers/infiniband/hw/ocrdma/ocrdma_hw.c spin_lock_irqsave(&cq->comp_handler_lock, flags); cq 979 drivers/infiniband/hw/ocrdma/ocrdma_hw.c (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); cq 980 drivers/infiniband/hw/ocrdma/ocrdma_hw.c spin_unlock_irqrestore(&cq->comp_handler_lock, flags); cq 982 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_qp_buddy_cq_handler(dev, cq); cq 988 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (cq_id == dev->mq.cq.id) cq 1784 drivers/infiniband/hw/ocrdma/ocrdma_hw.c int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, cq 1802 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->max_hw_cqe = 1; cq 1807 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->max_hw_cqe = dev->attr.max_cqe; cq 1813 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); cq 1820 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); cq 1821 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (!cq->va) { cq 1825 drivers/infiniband/hw/ocrdma/ocrdma_hw.c page_size = cq->len / hw_pages; cq 1831 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->eqn = ocrdma_bind_eq(dev); cq 1833 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cqe_count = cq->len / cqe_size; cq 1834 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->cqe_cnt = cqe_count; cq 1856 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->cmd.eqn = cq->eqn; cq 1861 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->phase_change = false; cq 1862 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); cq 1864 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; cq 1866 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->phase_change = true; cq 1872 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size); cq 1878 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK); cq 1882 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_unbind_eq(dev, cq->eqn); cq 1883 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa); cq 1889 drivers/infiniband/hw/ocrdma/ocrdma_hw.c void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq) cq 1900 drivers/infiniband/hw/ocrdma/ocrdma_hw.c (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & cq 1904 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_unbind_eq(dev, cq->eqn); cq 1905 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); cq 2082 drivers/infiniband/hw/ocrdma/ocrdma_hw.c bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) cq 2086 drivers/infiniband/hw/ocrdma/ocrdma_hw.c list_for_each_entry(tmp, &cq->sq_head, sq_entry) { cq 2095 drivers/infiniband/hw/ocrdma/ocrdma_hw.c bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp) cq 2099 drivers/infiniband/hw/ocrdma/ocrdma_hw.c list_for_each_entry(tmp, &cq->rq_head, rq_entry) { cq 2376 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq; cq 2431 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq = get_ocrdma_cq(attrs->send_cq); cq 2432 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) & cq 2434 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq_cq = cq; cq 2435 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cq = get_ocrdma_cq(attrs->recv_cq); cq 2436 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) & cq 2438 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->rq_cq = cq; cq 125 drivers/infiniband/hw/ocrdma/ocrdma_hw.h void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq); cq 939 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, cq 952 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.cq_id = cq->id; cq 953 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.page_size = PAGE_ALIGN(cq->len); cq 955 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.max_hw_cqe = cq->max_hw_cqe; cq 956 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.page_addr[0] = virt_to_phys(cq->va); cq 959 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.phase_change = cq->phase_change ? 1 : 0; cq 963 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c __func__, dev->id, cq->id); cq 974 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cq->ucontext = uctx; cq 984 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); cq 1001 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_init(&cq->cq_lock); cq 1002 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_init(&cq->comp_handler_lock); cq 1003 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c INIT_LIST_HEAD(&cq->sq_head); cq 1004 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c INIT_LIST_HEAD(&cq->rq_head); cq 1009 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id); cq 1014 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c status = ocrdma_copy_cq_uresp(dev, cq, udata); cq 1018 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cq->phase = OCRDMA_CQE_VALID; cq 1019 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c dev->cq_tbl[cq->id] = cq; cq 1023 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_mbx_destroy_cq(dev, cq); cq 1031 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); cq 1033 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) { cq 1041 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_flush_cq(struct ocrdma_cq *cq) cq 1047 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); cq 1050 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cqe = cq->va; cq 1051 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cqe_cnt = cq->cqe_cnt; cq 1056 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 1058 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (is_cqe_valid(cq, cqe)) cq 1063 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); cq 1064 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 1069 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); cq 1075 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c dev->cq_tbl[cq->id] = NULL; cq 1076 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c indx = ocrdma_get_eq_table_index(dev, cq->eqn); cq 1081 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_flush_cq(cq); cq 1083 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_mbx_destroy_cq(dev, cq); cq 1084 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (cq->ucontext) { cq 1085 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c pdid = cq->ucontext->cntxt_pd->id; cq 1086 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq 1087 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c PAGE_ALIGN(cq->len)); cq 1088 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_del_mmap(cq->ucontext, cq 1601 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq) cq 1610 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_irqsave(&cq->cq_lock, cq_flags); cq 1619 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cur_getp = cq->getp; cq 1626 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cqe = cq->va + cur_getp; cq 1660 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cur_getp = (cur_getp + 1) % cq->max_hw_cqe; cq 1662 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_unlock_irqrestore(&cq->cq_lock, cq_flags); cq 2755 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe, cq 2758 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (cq->phase_change) { cq 2760 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cq->phase = (~cq->phase & OCRDMA_CQE_VALID); cq 2767 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries, cq 2775 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); cq 2779 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cur_getp = cq->getp; cq 2781 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cqe = cq->va + cur_getp; cq 2783 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (!is_cqe_valid(cq, cqe)) cq 2807 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cur_getp = (cur_getp + 1) % cq->max_hw_cqe; cq 2808 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_change_cq_phase(cq, cqe, cur_getp); cq 2818 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cq->getp = cur_getp; cq 2821 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes); cq 2827 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries, cq 2835 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) { cq 2838 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) { cq 2856 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); cq 2863 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 2864 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc); cq 2865 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 2876 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c list_for_each_entry(qp, &cq->sq_head, sq_entry) { cq 2879 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc); cq 2891 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); cq 2897 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c cq_id = cq->id; cq 2899 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 2906 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 454 drivers/infiniband/hw/qedr/main.c struct qedr_cq *cq; cq 468 drivers/infiniband/hw/qedr/main.c cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi, cq 471 drivers/infiniband/hw/qedr/main.c if (cq == NULL) { cq 480 drivers/infiniband/hw/qedr/main.c if (cq->sig != QEDR_CQ_MAGIC_NUMBER) { cq 483 drivers/infiniband/hw/qedr/main.c cq_handle->hi, cq_handle->lo, cq); cq 487 drivers/infiniband/hw/qedr/main.c cq->arm_flags = 0; cq 489 drivers/infiniband/hw/qedr/main.c if (!cq->destroyed && cq->ibcq.comp_handler) cq 490 drivers/infiniband/hw/qedr/main.c (*cq->ibcq.comp_handler) cq 491 drivers/infiniband/hw/qedr/main.c (&cq->ibcq, cq->ibcq.cq_context); cq 499 drivers/infiniband/hw/qedr/main.c cq->cnq_notif++; cq 674 drivers/infiniband/hw/qedr/main.c struct qedr_cq *cq; cq 729 drivers/infiniband/hw/qedr/main.c cq = (struct qedr_cq *)(uintptr_t)roce_handle64; cq 730 drivers/infiniband/hw/qedr/main.c if (cq) { cq 731 drivers/infiniband/hw/qedr/main.c ibcq = &cq->ibcq; cq 734 drivers/infiniband/hw/qedr/main.c event.element.cq = ibcq; cq 742 drivers/infiniband/hw/qedr/main.c DP_ERR(dev, "CQ event %d on handle %p\n", e_code, cq); cq 75 drivers/infiniband/hw/qedr/qedr_roce_cm.c struct qedr_cq *cq = dev->gsi_sqcq; cq 82 drivers/infiniband/hw/qedr/qedr_roce_cm.c cq->ibcq.comp_handler ? "Yes" : "No"); cq 92 drivers/infiniband/hw/qedr/qedr_roce_cm.c if (cq->ibcq.comp_handler) cq 93 drivers/infiniband/hw/qedr/qedr_roce_cm.c (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); cq 100 drivers/infiniband/hw/qedr/qedr_roce_cm.c struct qedr_cq *cq = dev->gsi_rqcq; cq 121 drivers/infiniband/hw/qedr/qedr_roce_cm.c if (cq->ibcq.comp_handler) cq 122 drivers/infiniband/hw/qedr/qedr_roce_cm.c (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); cq 137 drivers/infiniband/hw/qedr/qedr_roce_cm.c struct qedr_cq *cq; cq 139 drivers/infiniband/hw/qedr/qedr_roce_cm.c cq = get_qedr_cq(attrs->send_cq); cq 140 drivers/infiniband/hw/qedr/qedr_roce_cm.c iparams.icid = cq->icid; cq 142 drivers/infiniband/hw/qedr/qedr_roce_cm.c dev->ops->common->chain_free(dev->cdev, &cq->pbl); cq 144 drivers/infiniband/hw/qedr/qedr_roce_cm.c cq = get_qedr_cq(attrs->recv_cq); cq 146 drivers/infiniband/hw/qedr/qedr_roce_cm.c if (iparams.icid != cq->icid) { cq 147 drivers/infiniband/hw/qedr/qedr_roce_cm.c iparams.icid = cq->icid; cq 149 drivers/infiniband/hw/qedr/qedr_roce_cm.c dev->ops->common->chain_free(dev->cdev, &cq->pbl); cq 675 drivers/infiniband/hw/qedr/qedr_roce_cm.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 681 drivers/infiniband/hw/qedr/qedr_roce_cm.c spin_lock_irqsave(&cq->cq_lock, flags); cq 723 drivers/infiniband/hw/qedr/qedr_roce_cm.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 662 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, struct ib_udata *udata) cq 670 drivers/infiniband/hw/qedr/verbs.c uresp.icid = cq->icid; cq 674 drivers/infiniband/hw/qedr/verbs.c DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); cq 679 drivers/infiniband/hw/qedr/verbs.c static void consume_cqe(struct qedr_cq *cq) cq 681 drivers/infiniband/hw/qedr/verbs.c if (cq->latest_cqe == cq->toggle_cqe) cq 682 drivers/infiniband/hw/qedr/verbs.c cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; cq 684 drivers/infiniband/hw/qedr/verbs.c cq->latest_cqe = qed_chain_consume(&cq->pbl); cq 748 drivers/infiniband/hw/qedr/verbs.c static inline void qedr_init_cq_params(struct qedr_cq *cq, cq 757 drivers/infiniband/hw/qedr/verbs.c params->cq_handle_hi = upper_32_bits((uintptr_t)cq); cq 758 drivers/infiniband/hw/qedr/verbs.c params->cq_handle_lo = lower_32_bits((uintptr_t)cq); cq 767 drivers/infiniband/hw/qedr/verbs.c static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags) cq 769 drivers/infiniband/hw/qedr/verbs.c cq->db.data.agg_flags = flags; cq 770 drivers/infiniband/hw/qedr/verbs.c cq->db.data.value = cpu_to_le32(cons); cq 771 drivers/infiniband/hw/qedr/verbs.c writeq(cq->db.raw, cq->db_addr); cq 776 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 782 drivers/infiniband/hw/qedr/verbs.c if (cq->destroyed) { cq 785 drivers/infiniband/hw/qedr/verbs.c cq, cq->icid); cq 790 drivers/infiniband/hw/qedr/verbs.c if (cq->cq_type == QEDR_CQ_TYPE_GSI) cq 793 drivers/infiniband/hw/qedr/verbs.c spin_lock_irqsave(&cq->cq_lock, sflags); cq 795 drivers/infiniband/hw/qedr/verbs.c cq->arm_flags = 0; cq 798 drivers/infiniband/hw/qedr/verbs.c cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD; cq 801 drivers/infiniband/hw/qedr/verbs.c cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD; cq 803 drivers/infiniband/hw/qedr/verbs.c doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); cq 805 drivers/infiniband/hw/qedr/verbs.c spin_unlock_irqrestore(&cq->cq_lock, sflags); cq 823 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 857 drivers/infiniband/hw/qedr/verbs.c cq->cq_type = QEDR_CQ_TYPE_USER; cq 859 drivers/infiniband/hw/qedr/verbs.c rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr, cq 865 drivers/infiniband/hw/qedr/verbs.c pbl_ptr = cq->q.pbl_tbl->pa; cq 866 drivers/infiniband/hw/qedr/verbs.c page_cnt = cq->q.pbl_info.num_pbes; cq 868 drivers/infiniband/hw/qedr/verbs.c cq->ibcq.cqe = chain_entries; cq 870 drivers/infiniband/hw/qedr/verbs.c cq->cq_type = QEDR_CQ_TYPE_KERNEL; cq 878 drivers/infiniband/hw/qedr/verbs.c &cq->pbl, NULL); cq 882 drivers/infiniband/hw/qedr/verbs.c page_cnt = qed_chain_get_page_cnt(&cq->pbl); cq 883 drivers/infiniband/hw/qedr/verbs.c pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl); cq 884 drivers/infiniband/hw/qedr/verbs.c cq->ibcq.cqe = cq->pbl.capacity; cq 887 drivers/infiniband/hw/qedr/verbs.c qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt, cq 894 drivers/infiniband/hw/qedr/verbs.c cq->icid = icid; cq 895 drivers/infiniband/hw/qedr/verbs.c cq->sig = QEDR_CQ_MAGIC_NUMBER; cq 896 drivers/infiniband/hw/qedr/verbs.c spin_lock_init(&cq->cq_lock); cq 899 drivers/infiniband/hw/qedr/verbs.c rc = qedr_copy_cq_uresp(dev, cq, udata); cq 904 drivers/infiniband/hw/qedr/verbs.c cq->db_addr = dev->db_addr + cq 906 drivers/infiniband/hw/qedr/verbs.c cq->db.data.icid = cq->icid; cq 907 drivers/infiniband/hw/qedr/verbs.c cq->db.data.params = DB_AGG_CMD_SET << cq 911 drivers/infiniband/hw/qedr/verbs.c cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl); cq 912 drivers/infiniband/hw/qedr/verbs.c cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK; cq 913 drivers/infiniband/hw/qedr/verbs.c cq->latest_cqe = NULL; cq 914 drivers/infiniband/hw/qedr/verbs.c consume_cqe(cq); cq 915 drivers/infiniband/hw/qedr/verbs.c cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl); cq 920 drivers/infiniband/hw/qedr/verbs.c cq->icid, cq, params.cq_size); cq 925 drivers/infiniband/hw/qedr/verbs.c destroy_iparams.icid = cq->icid; cq 930 drivers/infiniband/hw/qedr/verbs.c qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); cq 932 drivers/infiniband/hw/qedr/verbs.c dev->ops->common->chain_free(dev->cdev, &cq->pbl); cq 935 drivers/infiniband/hw/qedr/verbs.c ib_umem_release(cq->q.umem); cq 943 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 945 drivers/infiniband/hw/qedr/verbs.c DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq); cq 958 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 961 drivers/infiniband/hw/qedr/verbs.c DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid); cq 963 drivers/infiniband/hw/qedr/verbs.c cq->destroyed = 1; cq 966 drivers/infiniband/hw/qedr/verbs.c if (cq->cq_type == QEDR_CQ_TYPE_GSI) cq 969 drivers/infiniband/hw/qedr/verbs.c iparams.icid = cq->icid; cq 971 drivers/infiniband/hw/qedr/verbs.c dev->ops->common->chain_free(dev->cdev, &cq->pbl); cq 974 drivers/infiniband/hw/qedr/verbs.c qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); cq 975 drivers/infiniband/hw/qedr/verbs.c ib_umem_release(cq->q.umem); cq 990 drivers/infiniband/hw/qedr/verbs.c while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { cq 996 drivers/infiniband/hw/qedr/verbs.c while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) { cq 3652 drivers/infiniband/hw/qedr/verbs.c static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe) cq 3657 drivers/infiniband/hw/qedr/verbs.c cq->pbl_toggle; cq 3679 drivers/infiniband/hw/qedr/verbs.c static union rdma_cqe *get_cqe(struct qedr_cq *cq) cq 3681 drivers/infiniband/hw/qedr/verbs.c return cq->latest_cqe; cq 3698 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, int num_entries, cq 3753 drivers/infiniband/hw/qedr/verbs.c struct qedr_qp *qp, struct qedr_cq *cq, cq 3761 drivers/infiniband/hw/qedr/verbs.c cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, cq 3768 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3769 drivers/infiniband/hw/qedr/verbs.c cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, cq 3775 drivers/infiniband/hw/qedr/verbs.c cnt = process_req(dev, qp, cq, num_entries, wc, cq 3786 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3792 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3798 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3804 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3810 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3816 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3822 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3828 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3834 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3840 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3846 drivers/infiniband/hw/qedr/verbs.c cq->icid, qp->icid); cq 3849 drivers/infiniband/hw/qedr/verbs.c cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons, cq 3910 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, struct ib_wc *wc, cq 3921 drivers/infiniband/hw/qedr/verbs.c cq, cq->icid, resp->flags); cq 3928 drivers/infiniband/hw/qedr/verbs.c cq, cq->icid, resp->status); cq 3939 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, struct ib_wc *wc, cq 3957 drivers/infiniband/hw/qedr/verbs.c __process_resp_one(dev, qp, cq, wc, resp, wr_id); cq 3964 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, struct ib_wc *wc, cq 3969 drivers/infiniband/hw/qedr/verbs.c __process_resp_one(dev, qp, cq, wc, resp, wr_id); cq 3978 drivers/infiniband/hw/qedr/verbs.c static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq, cq 4003 drivers/infiniband/hw/qedr/verbs.c static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp, cq 4007 drivers/infiniband/hw/qedr/verbs.c consume_cqe(cq); cq 4013 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, int num_entries, cq 4019 drivers/infiniband/hw/qedr/verbs.c cnt = process_resp_one_srq(dev, qp, cq, wc, resp); cq 4020 drivers/infiniband/hw/qedr/verbs.c consume_cqe(cq); cq 4026 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq, int num_entries, cq 4033 drivers/infiniband/hw/qedr/verbs.c cnt = process_resp_flush(qp, cq, num_entries, wc, cq 4035 drivers/infiniband/hw/qedr/verbs.c try_consume_resp_cqe(cq, qp, resp, update); cq 4037 drivers/infiniband/hw/qedr/verbs.c cnt = process_resp_one(dev, qp, cq, wc, resp); cq 4038 drivers/infiniband/hw/qedr/verbs.c consume_cqe(cq); cq 4045 drivers/infiniband/hw/qedr/verbs.c static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp, cq 4049 drivers/infiniband/hw/qedr/verbs.c consume_cqe(cq); cq 4057 drivers/infiniband/hw/qedr/verbs.c struct qedr_cq *cq = get_qedr_cq(ibcq); cq 4064 drivers/infiniband/hw/qedr/verbs.c if (cq->destroyed) { cq 4067 drivers/infiniband/hw/qedr/verbs.c cq, cq->icid); cq 4071 drivers/infiniband/hw/qedr/verbs.c if (cq->cq_type == QEDR_CQ_TYPE_GSI) cq 4074 drivers/infiniband/hw/qedr/verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 4075 drivers/infiniband/hw/qedr/verbs.c cqe = cq->latest_cqe; cq 4076 drivers/infiniband/hw/qedr/verbs.c old_cons = qed_chain_get_cons_idx_u32(&cq->pbl); cq 4077 drivers/infiniband/hw/qedr/verbs.c while (num_entries && is_valid_cqe(cq, cqe)) { cq 4094 drivers/infiniband/hw/qedr/verbs.c cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc, cq 4096 drivers/infiniband/hw/qedr/verbs.c try_consume_req_cqe(cq, qp, &cqe->req, &update); cq 4099 drivers/infiniband/hw/qedr/verbs.c cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc, cq 4103 drivers/infiniband/hw/qedr/verbs.c cnt = qedr_poll_cq_resp_srq(dev, qp, cq, num_entries, cq 4116 drivers/infiniband/hw/qedr/verbs.c cqe = get_cqe(cq); cq 4118 drivers/infiniband/hw/qedr/verbs.c new_cons = qed_chain_get_cons_idx_u32(&cq->pbl); cq 4120 drivers/infiniband/hw/qedr/verbs.c cq->cq_cons += new_cons - old_cons; cq 4126 drivers/infiniband/hw/qedr/verbs.c doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags); cq 4128 drivers/infiniband/hw/qedr/verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 600 drivers/infiniband/hw/usnic/usnic_ib_verbs.c void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) cq 63 drivers/infiniband/hw/usnic/usnic_ib_verbs.h void usnic_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); cq 534 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq); cq 67 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_cq *cq = to_vcq(ibcq); cq 68 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c u32 val = cq->cq_handle; cq 75 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c spin_lock_irqsave(&cq->cq_lock, flags); cq 82 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, cq 83 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); cq 88 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 107 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_cq *cq = to_vcq(ibcq); cq 129 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe = entries; cq 130 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->is_kernel = !udata; cq 132 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (!cq->is_kernel) { cq 138 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->umem = ib_umem_get(udata, ucmd.buf_addr, ucmd.buf_size, cq 140 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (IS_ERR(cq->umem)) { cq 141 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ret = PTR_ERR(cq->umem); cq 145 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c npages = ib_umem_page_count(cq->umem); cq 152 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->offset = PAGE_SIZE; cq 162 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); cq 170 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (cq->is_kernel) cq 171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ring_state = cq->pdir.pages[0]; cq 173 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); cq 175 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c refcount_set(&cq->refcnt, 1); cq 176 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c init_completion(&cq->free); cq 177 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c spin_lock_init(&cq->cq_lock); cq 184 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cmd->pdir_dma = cq->pdir.dir_dma; cq 192 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe = resp->cqe; cq 193 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->cq_handle = resp->cq_handle; cq 196 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; cq 199 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (!cq->is_kernel) { cq 200 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->uar = &context->uar; cq 206 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_destroy_cq(&cq->ibcq, udata); cq 214 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_page_dir_cleanup(dev, &cq->pdir); cq 216 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ib_umem_release(cq->umem); cq 222 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) cq 224 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (refcount_dec_and_test(&cq->refcnt)) cq 225 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c complete(&cq->free); cq 226 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c wait_for_completion(&cq->free); cq 228 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c ib_umem_release(cq->umem); cq 230 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_page_dir_cleanup(dev, &cq->pdir); cq 238 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) cq 240 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_cq *vcq = to_vcq(cq); cq 243 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_dev *dev = to_vdev(cq->device); cq 266 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c static inline struct pvrdma_cqe *get_cqe(struct pvrdma_cq *cq, int i) cq 269 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c &cq->pdir, cq 270 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->offset + cq 274 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c void _pvrdma_flush_cqe(struct pvrdma_qp *qp, struct pvrdma_cq *cq) cq 279 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (!cq->is_kernel) cq 283 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, cq 284 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); cq 288 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c int tail = pvrdma_idx(&cq->ring_state->rx.prod_tail, cq 289 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe); cq 294 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c (cq->ibcq.cqe - head + tail); cq 298 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c curr = cq->ibcq.cqe - 1; cq 300 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c tail = cq->ibcq.cqe - 1; cq 301 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c curr_cqe = get_cqe(cq, curr); cq 304 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cqe = get_cqe(cq, tail); cq 310 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c &cq->ring_state->rx.cons_head, cq 311 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe); cq 318 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c static int pvrdma_poll_one(struct pvrdma_cq *cq, struct pvrdma_qp **cur_qp, cq 321 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_dev *dev = to_vdev(cq->ibcq.device); cq 328 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx, cq 329 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cq->ibcq.cqe, &head); cq 334 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_write_uar_cq(dev, cq->cq_handle | PVRDMA_UAR_CQ_POLL); cq 343 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c cqe = get_cqe(cq, head); cq 369 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); cq 384 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c struct pvrdma_cq *cq = to_vcq(ibcq); cq 392 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c spin_lock_irqsave(&cq->cq_lock, flags); cq 394 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c if (pvrdma_poll_one(cq, &cur_qp, wc + npolled)) cq 398 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 339 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c struct pvrdma_cq *cq; cq 343 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq]; cq 344 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq) cq 345 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c refcount_inc(&cq->refcnt); cq 348 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq && cq->ibcq.event_handler) { cq 349 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c struct ib_cq *ibcq = &cq->ibcq; cq 353 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c e.element.cq = ibcq; cq 357 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq) { cq 358 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (refcount_dec_and_test(&cq->refcnt)) cq 359 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c complete(&cq->free); cq 511 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c struct pvrdma_cq *cq; cq 515 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq]; cq 516 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq) cq 517 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c refcount_inc(&cq->refcnt); cq 520 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq && cq->ibcq.comp_handler) cq 521 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq 522 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (cq) { cq 523 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (refcount_dec_and_test(&cq->refcnt)) cq 524 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c complete(&cq->free); cq 414 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h void pvrdma_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); cq 416 drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h int pvrdma_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 67 drivers/infiniband/sw/rdmavt/cq.c bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) cq 78 drivers/infiniband/sw/rdmavt/cq.c spin_lock_irqsave(&cq->lock, flags); cq 80 drivers/infiniband/sw/rdmavt/cq.c if (cq->ip) { cq 81 drivers/infiniband/sw/rdmavt/cq.c u_wc = cq->queue; cq 86 drivers/infiniband/sw/rdmavt/cq.c k_wc = cq->kqueue; cq 96 drivers/infiniband/sw/rdmavt/cq.c if (head >= (unsigned)cq->ibcq.cqe) { cq 97 drivers/infiniband/sw/rdmavt/cq.c head = cq->ibcq.cqe; cq 103 drivers/infiniband/sw/rdmavt/cq.c if (unlikely(next == tail || cq->cq_full)) { cq 104 drivers/infiniband/sw/rdmavt/cq.c struct rvt_dev_info *rdi = cq->rdi; cq 106 drivers/infiniband/sw/rdmavt/cq.c if (!cq->cq_full) cq 108 drivers/infiniband/sw/rdmavt/cq.c cq->cq_full = true; cq 109 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 110 drivers/infiniband/sw/rdmavt/cq.c if (cq->ibcq.event_handler) { cq 113 drivers/infiniband/sw/rdmavt/cq.c ev.device = cq->ibcq.device; cq 114 drivers/infiniband/sw/rdmavt/cq.c ev.element.cq = &cq->ibcq; cq 116 drivers/infiniband/sw/rdmavt/cq.c cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); cq 120 drivers/infiniband/sw/rdmavt/cq.c trace_rvt_cq_enter(cq, entry, head); cq 143 drivers/infiniband/sw/rdmavt/cq.c if (cq->notify == IB_CQ_NEXT_COMP || cq 144 drivers/infiniband/sw/rdmavt/cq.c (cq->notify == IB_CQ_SOLICITED && cq 150 drivers/infiniband/sw/rdmavt/cq.c cq->notify = RVT_CQ_NONE; cq 151 drivers/infiniband/sw/rdmavt/cq.c cq->triggered++; cq 152 drivers/infiniband/sw/rdmavt/cq.c queue_work_on(cq->comp_vector_cpu, comp_vector_wq, cq 153 drivers/infiniband/sw/rdmavt/cq.c &cq->comptask); cq 156 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 163 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); cq 173 drivers/infiniband/sw/rdmavt/cq.c u8 triggered = cq->triggered; cq 182 drivers/infiniband/sw/rdmavt/cq.c cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq 185 drivers/infiniband/sw/rdmavt/cq.c if (cq->triggered == triggered) cq 205 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); cq 250 drivers/infiniband/sw/rdmavt/cq.c cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); cq 251 drivers/infiniband/sw/rdmavt/cq.c if (IS_ERR(cq->ip)) { cq 252 drivers/infiniband/sw/rdmavt/cq.c err = PTR_ERR(cq->ip); cq 256 drivers/infiniband/sw/rdmavt/cq.c err = ib_copy_to_udata(udata, &cq->ip->offset, cq 257 drivers/infiniband/sw/rdmavt/cq.c sizeof(cq->ip->offset)); cq 272 drivers/infiniband/sw/rdmavt/cq.c if (cq->ip) { cq 274 drivers/infiniband/sw/rdmavt/cq.c list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps); cq 283 drivers/infiniband/sw/rdmavt/cq.c cq->rdi = rdi; cq 285 drivers/infiniband/sw/rdmavt/cq.c cq->comp_vector_cpu = cq 288 drivers/infiniband/sw/rdmavt/cq.c cq->comp_vector_cpu = cq 291 drivers/infiniband/sw/rdmavt/cq.c cq->ibcq.cqe = entries; cq 292 drivers/infiniband/sw/rdmavt/cq.c cq->notify = RVT_CQ_NONE; cq 293 drivers/infiniband/sw/rdmavt/cq.c spin_lock_init(&cq->lock); cq 294 drivers/infiniband/sw/rdmavt/cq.c INIT_WORK(&cq->comptask, send_complete); cq 296 drivers/infiniband/sw/rdmavt/cq.c cq->queue = u_wc; cq 298 drivers/infiniband/sw/rdmavt/cq.c cq->kqueue = k_wc; cq 300 drivers/infiniband/sw/rdmavt/cq.c trace_rvt_create_cq(cq, attr); cq 304 drivers/infiniband/sw/rdmavt/cq.c kfree(cq->ip); cq 320 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); cq 321 drivers/infiniband/sw/rdmavt/cq.c struct rvt_dev_info *rdi = cq->rdi; cq 323 drivers/infiniband/sw/rdmavt/cq.c flush_work(&cq->comptask); cq 327 drivers/infiniband/sw/rdmavt/cq.c if (cq->ip) cq 328 drivers/infiniband/sw/rdmavt/cq.c kref_put(&cq->ip->ref, rvt_release_mmap_info); cq 330 drivers/infiniband/sw/rdmavt/cq.c vfree(cq->kqueue); cq 345 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); cq 349 drivers/infiniband/sw/rdmavt/cq.c spin_lock_irqsave(&cq->lock, flags); cq 354 drivers/infiniband/sw/rdmavt/cq.c if (cq->notify != IB_CQ_NEXT_COMP) cq 355 drivers/infiniband/sw/rdmavt/cq.c cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; cq 358 drivers/infiniband/sw/rdmavt/cq.c if (cq->queue) { cq 359 drivers/infiniband/sw/rdmavt/cq.c if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) != cq 360 drivers/infiniband/sw/rdmavt/cq.c RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) cq 363 drivers/infiniband/sw/rdmavt/cq.c if (cq->kqueue->head != cq->kqueue->tail) cq 368 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 381 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); cq 385 drivers/infiniband/sw/rdmavt/cq.c struct rvt_dev_info *rdi = cq->rdi; cq 419 drivers/infiniband/sw/rdmavt/cq.c spin_lock_irq(&cq->lock); cq 425 drivers/infiniband/sw/rdmavt/cq.c old_u_wc = cq->queue; cq 429 drivers/infiniband/sw/rdmavt/cq.c old_k_wc = cq->kqueue; cq 434 drivers/infiniband/sw/rdmavt/cq.c if (head > (u32)cq->ibcq.cqe) cq 435 drivers/infiniband/sw/rdmavt/cq.c head = (u32)cq->ibcq.cqe; cq 436 drivers/infiniband/sw/rdmavt/cq.c if (tail > (u32)cq->ibcq.cqe) cq 437 drivers/infiniband/sw/rdmavt/cq.c tail = (u32)cq->ibcq.cqe; cq 439 drivers/infiniband/sw/rdmavt/cq.c n = cq->ibcq.cqe + 1 + head - tail; cq 451 drivers/infiniband/sw/rdmavt/cq.c if (tail == (u32)cq->ibcq.cqe) cq 456 drivers/infiniband/sw/rdmavt/cq.c cq->ibcq.cqe = cqe; cq 460 drivers/infiniband/sw/rdmavt/cq.c cq->queue = u_wc; cq 464 drivers/infiniband/sw/rdmavt/cq.c cq->kqueue = k_wc; cq 466 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irq(&cq->lock); cq 473 drivers/infiniband/sw/rdmavt/cq.c if (cq->ip) { cq 474 drivers/infiniband/sw/rdmavt/cq.c struct rvt_mmap_info *ip = cq->ip; cq 498 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irq(&cq->lock); cq 519 drivers/infiniband/sw/rdmavt/cq.c struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); cq 526 drivers/infiniband/sw/rdmavt/cq.c if (cq->ip) cq 529 drivers/infiniband/sw/rdmavt/cq.c spin_lock_irqsave(&cq->lock, flags); cq 531 drivers/infiniband/sw/rdmavt/cq.c wc = cq->kqueue; cq 533 drivers/infiniband/sw/rdmavt/cq.c if (tail > (u32)cq->ibcq.cqe) cq 534 drivers/infiniband/sw/rdmavt/cq.c tail = (u32)cq->ibcq.cqe; cq 539 drivers/infiniband/sw/rdmavt/cq.c trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled); cq 541 drivers/infiniband/sw/rdmavt/cq.c if (tail >= cq->ibcq.cqe) cq 548 drivers/infiniband/sw/rdmavt/cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 78 drivers/infiniband/sw/rdmavt/trace_cq.h TP_PROTO(struct rvt_cq *cq, cq 80 drivers/infiniband/sw/rdmavt/trace_cq.h TP_ARGS(cq, attr), cq 81 drivers/infiniband/sw/rdmavt/trace_cq.h TP_STRUCT__entry(RDI_DEV_ENTRY(cq->rdi) cq 88 drivers/infiniband/sw/rdmavt/trace_cq.h TP_fast_assign(RDI_DEV_ASSIGN(cq->rdi) cq 89 drivers/infiniband/sw/rdmavt/trace_cq.h __entry->ip = cq->ip; cq 93 drivers/infiniband/sw/rdmavt/trace_cq.h cq->comp_vector_cpu; cq 104 drivers/infiniband/sw/rdmavt/trace_cq.h TP_PROTO(struct rvt_cq *cq, const struct ib_cq_init_attr *attr), cq 105 drivers/infiniband/sw/rdmavt/trace_cq.h TP_ARGS(cq, attr)); cq 112 drivers/infiniband/sw/rdmavt/trace_cq.h TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), cq 113 drivers/infiniband/sw/rdmavt/trace_cq.h TP_ARGS(cq, wc, idx), cq 115 drivers/infiniband/sw/rdmavt/trace_cq.h RDI_DEV_ENTRY(cq->rdi) cq 126 drivers/infiniband/sw/rdmavt/trace_cq.h RDI_DEV_ASSIGN(cq->rdi) cq 152 drivers/infiniband/sw/rdmavt/trace_cq.h TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), cq 153 drivers/infiniband/sw/rdmavt/trace_cq.h TP_ARGS(cq, wc, idx)); cq 157 drivers/infiniband/sw/rdmavt/trace_cq.h TP_PROTO(struct rvt_cq *cq, struct ib_wc *wc, u32 idx), cq 158 drivers/infiniband/sw/rdmavt/trace_cq.h TP_ARGS(cq, wc, idx)); cq 38 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, cq 54 drivers/infiniband/sw/rxe/rxe_cq.c if (cq) { cq 55 drivers/infiniband/sw/rxe/rxe_cq.c count = queue_count(cq->queue); cq 71 drivers/infiniband/sw/rxe/rxe_cq.c struct rxe_cq *cq = (struct rxe_cq *)data; cq 74 drivers/infiniband/sw/rxe/rxe_cq.c spin_lock_irqsave(&cq->cq_lock, flags); cq 75 drivers/infiniband/sw/rxe/rxe_cq.c if (cq->is_dying) { cq 76 drivers/infiniband/sw/rxe/rxe_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 79 drivers/infiniband/sw/rxe/rxe_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 81 drivers/infiniband/sw/rxe/rxe_cq.c cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq 84 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, cq 90 drivers/infiniband/sw/rxe/rxe_cq.c cq->queue = rxe_queue_init(rxe, &cqe, cq 92 drivers/infiniband/sw/rxe/rxe_cq.c if (!cq->queue) { cq 98 drivers/infiniband/sw/rxe/rxe_cq.c cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); cq 100 drivers/infiniband/sw/rxe/rxe_cq.c vfree(cq->queue->buf); cq 101 drivers/infiniband/sw/rxe/rxe_cq.c kfree(cq->queue); cq 106 drivers/infiniband/sw/rxe/rxe_cq.c cq->is_user = 1; cq 108 drivers/infiniband/sw/rxe/rxe_cq.c cq->is_dying = false; cq 110 drivers/infiniband/sw/rxe/rxe_cq.c tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq); cq 112 drivers/infiniband/sw/rxe/rxe_cq.c spin_lock_init(&cq->cq_lock); cq 113 drivers/infiniband/sw/rxe/rxe_cq.c cq->ibcq.cqe = cqe; cq 117 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, cq 123 drivers/infiniband/sw/rxe/rxe_cq.c err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe, cq 125 drivers/infiniband/sw/rxe/rxe_cq.c uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock); cq 127 drivers/infiniband/sw/rxe/rxe_cq.c cq->ibcq.cqe = cqe; cq 132 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) cq 137 drivers/infiniband/sw/rxe/rxe_cq.c spin_lock_irqsave(&cq->cq_lock, flags); cq 139 drivers/infiniband/sw/rxe/rxe_cq.c if (unlikely(queue_full(cq->queue))) { cq 140 drivers/infiniband/sw/rxe/rxe_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 141 drivers/infiniband/sw/rxe/rxe_cq.c if (cq->ibcq.event_handler) { cq 142 drivers/infiniband/sw/rxe/rxe_cq.c ev.device = cq->ibcq.device; cq 143 drivers/infiniband/sw/rxe/rxe_cq.c ev.element.cq = &cq->ibcq; cq 145 drivers/infiniband/sw/rxe/rxe_cq.c cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); cq 151 drivers/infiniband/sw/rxe/rxe_cq.c memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe)); cq 158 drivers/infiniband/sw/rxe/rxe_cq.c advance_producer(cq->queue); cq 159 drivers/infiniband/sw/rxe/rxe_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 161 drivers/infiniband/sw/rxe/rxe_cq.c if ((cq->notify == IB_CQ_NEXT_COMP) || cq 162 drivers/infiniband/sw/rxe/rxe_cq.c (cq->notify == IB_CQ_SOLICITED && solicited)) { cq 163 drivers/infiniband/sw/rxe/rxe_cq.c cq->notify = 0; cq 164 drivers/infiniband/sw/rxe/rxe_cq.c tasklet_schedule(&cq->comp_task); cq 170 drivers/infiniband/sw/rxe/rxe_cq.c void rxe_cq_disable(struct rxe_cq *cq) cq 174 drivers/infiniband/sw/rxe/rxe_cq.c spin_lock_irqsave(&cq->cq_lock, flags); cq 175 drivers/infiniband/sw/rxe/rxe_cq.c cq->is_dying = true; cq 176 drivers/infiniband/sw/rxe/rxe_cq.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 181 drivers/infiniband/sw/rxe/rxe_cq.c struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); cq 183 drivers/infiniband/sw/rxe/rxe_cq.c if (cq->queue) cq 184 drivers/infiniband/sw/rxe/rxe_cq.c rxe_queue_cleanup(cq->queue); cq 52 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, cq 55 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, cq 59 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, cq 63 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); cq 65 drivers/infiniband/sw/rxe/rxe_loc.h void rxe_cq_disable(struct rxe_cq *cq); cq 787 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 803 drivers/infiniband/sw/rxe/rxe_verbs.c err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata, cq 808 drivers/infiniband/sw/rxe/rxe_verbs.c return rxe_add_to_pool(&rxe->cq_pool, &cq->pelem); cq 813 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 815 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_cq_disable(cq); cq 817 drivers/infiniband/sw/rxe/rxe_verbs.c rxe_drop_ref(cq); cq 823 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 833 drivers/infiniband/sw/rxe/rxe_verbs.c err = rxe_cq_chk_attr(rxe, cq, cqe, 0); cq 837 drivers/infiniband/sw/rxe/rxe_verbs.c err = rxe_cq_resize_queue(cq, cqe, uresp, udata); cq 850 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 854 drivers/infiniband/sw/rxe/rxe_verbs.c spin_lock_irqsave(&cq->cq_lock, flags); cq 856 drivers/infiniband/sw/rxe/rxe_verbs.c cqe = queue_head(cq->queue); cq 861 drivers/infiniband/sw/rxe/rxe_verbs.c advance_consumer(cq->queue); cq 863 drivers/infiniband/sw/rxe/rxe_verbs.c spin_unlock_irqrestore(&cq->cq_lock, flags); cq 870 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 871 drivers/infiniband/sw/rxe/rxe_verbs.c int count = queue_count(cq->queue); cq 878 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_cq *cq = to_rcq(ibcq); cq 882 drivers/infiniband/sw/rxe/rxe_verbs.c spin_lock_irqsave(&cq->cq_lock, irq_flags); cq 883 drivers/infiniband/sw/rxe/rxe_verbs.c if (cq->notify != IB_CQ_NEXT_COMP) cq 884 drivers/infiniband/sw/rxe/rxe_verbs.c cq->notify = flags & IB_CQ_SOLICITED_MASK; cq 886 drivers/infiniband/sw/rxe/rxe_verbs.c if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) cq 889 drivers/infiniband/sw/rxe/rxe_verbs.c spin_unlock_irqrestore(&cq->cq_lock, irq_flags); cq 453 drivers/infiniband/sw/rxe/rxe_verbs.h static inline struct rxe_cq *to_rcq(struct ib_cq *cq) cq 455 drivers/infiniband/sw/rxe/rxe_verbs.h return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL; cq 724 drivers/infiniband/sw/siw/siw.h #define siw_dbg_cq(cq, fmt, ...) \ cq 725 drivers/infiniband/sw/siw/siw.h ibdev_dbg(cq->base_cq.device, "CQ[%u] %s: " fmt, cq->id, __func__, \ cq 740 drivers/infiniband/sw/siw/siw.h void siw_cq_flush(struct siw_cq *cq); cq 743 drivers/infiniband/sw/siw/siw.h int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc); cq 48 drivers/infiniband/sw/siw/siw_cq.c int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) cq 53 drivers/infiniband/sw/siw/siw_cq.c spin_lock_irqsave(&cq->lock, flags); cq 55 drivers/infiniband/sw/siw/siw_cq.c cqe = &cq->queue[cq->cq_get % cq->num_cqe]; cq 68 drivers/infiniband/sw/siw/siw_cq.c if (likely(cq->kernel_verbs)) { cq 74 drivers/infiniband/sw/siw/siw_cq.c siw_dbg_cq(cq, cq 76 drivers/infiniband/sw/siw/siw_cq.c cq->cq_get % cq->num_cqe, cqe->opcode, cq 80 drivers/infiniband/sw/siw/siw_cq.c cq->cq_get++; cq 82 drivers/infiniband/sw/siw/siw_cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 86 drivers/infiniband/sw/siw/siw_cq.c spin_unlock_irqrestore(&cq->lock, flags); cq 96 drivers/infiniband/sw/siw/siw_cq.c void siw_cq_flush(struct siw_cq *cq) cq 100 drivers/infiniband/sw/siw/siw_cq.c while (siw_reap_cqe(cq, &wc)) cq 1021 drivers/infiniband/sw/siw/siw_qp.c static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) cq 1025 drivers/infiniband/sw/siw/siw_qp.c if (!cq->base_cq.comp_handler) cq 1029 drivers/infiniband/sw/siw/siw_qp.c cq_notify = READ_ONCE(cq->notify->flags); cq 1040 drivers/infiniband/sw/siw/siw_qp.c WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); cq 1050 drivers/infiniband/sw/siw/siw_qp.c struct siw_cq *cq = qp->scq; cq 1053 drivers/infiniband/sw/siw/siw_qp.c if (cq) { cq 1059 drivers/infiniband/sw/siw/siw_qp.c spin_lock_irqsave(&cq->lock, flags); cq 1061 drivers/infiniband/sw/siw/siw_qp.c idx = cq->cq_put % cq->num_cqe; cq 1062 drivers/infiniband/sw/siw/siw_qp.c cqe = &cq->queue[idx]; cq 1073 drivers/infiniband/sw/siw/siw_qp.c if (cq->kernel_verbs) cq 1083 drivers/infiniband/sw/siw/siw_qp.c cq->cq_put++; cq 1084 drivers/infiniband/sw/siw/siw_qp.c notify = siw_cq_notify_now(cq, sqe_flags); cq 1086 drivers/infiniband/sw/siw/siw_qp.c spin_unlock_irqrestore(&cq->lock, flags); cq 1089 drivers/infiniband/sw/siw/siw_qp.c siw_dbg_cq(cq, "Call completion handler\n"); cq 1090 drivers/infiniband/sw/siw/siw_qp.c cq->base_cq.comp_handler(&cq->base_cq, cq 1091 drivers/infiniband/sw/siw/siw_qp.c cq->base_cq.cq_context); cq 1094 drivers/infiniband/sw/siw/siw_qp.c spin_unlock_irqrestore(&cq->lock, flags); cq 1096 drivers/infiniband/sw/siw/siw_qp.c siw_cq_event(cq, IB_EVENT_CQ_ERR); cq 1108 drivers/infiniband/sw/siw/siw_qp.c struct siw_cq *cq = qp->rcq; cq 1111 drivers/infiniband/sw/siw/siw_qp.c if (cq) { cq 1116 drivers/infiniband/sw/siw/siw_qp.c spin_lock_irqsave(&cq->lock, flags); cq 1118 drivers/infiniband/sw/siw/siw_qp.c idx = cq->cq_put % cq->num_cqe; cq 1119 drivers/infiniband/sw/siw/siw_qp.c cqe = &cq->queue[idx]; cq 1131 drivers/infiniband/sw/siw/siw_qp.c if (cq->kernel_verbs) { cq 1145 drivers/infiniband/sw/siw/siw_qp.c cq->cq_put++; cq 1146 drivers/infiniband/sw/siw/siw_qp.c notify = siw_cq_notify_now(cq, SIW_WQE_SIGNALLED); cq 1148 drivers/infiniband/sw/siw/siw_qp.c spin_unlock_irqrestore(&cq->lock, flags); cq 1151 drivers/infiniband/sw/siw/siw_qp.c siw_dbg_cq(cq, "Call completion handler\n"); cq 1152 drivers/infiniband/sw/siw/siw_qp.c cq->base_cq.comp_handler(&cq->base_cq, cq 1153 drivers/infiniband/sw/siw/siw_qp.c cq->base_cq.cq_context); cq 1156 drivers/infiniband/sw/siw/siw_qp.c spin_unlock_irqrestore(&cq->lock, flags); cq 1158 drivers/infiniband/sw/siw/siw_qp.c siw_cq_event(cq, IB_EVENT_CQ_ERR); cq 1083 drivers/infiniband/sw/siw/siw_verbs.c struct siw_cq *cq = to_siw_cq(base_cq); cq 1089 drivers/infiniband/sw/siw/siw_verbs.c siw_dbg_cq(cq, "free CQ resources\n"); cq 1091 drivers/infiniband/sw/siw/siw_verbs.c siw_cq_flush(cq); cq 1093 drivers/infiniband/sw/siw/siw_verbs.c if (ctx && cq->xa_cq_index != SIW_INVAL_UOBJ_KEY) cq 1094 drivers/infiniband/sw/siw/siw_verbs.c kfree(xa_erase(&ctx->xa, cq->xa_cq_index)); cq 1098 drivers/infiniband/sw/siw/siw_verbs.c vfree(cq->queue); cq 1115 drivers/infiniband/sw/siw/siw_verbs.c struct siw_cq *cq = to_siw_cq(base_cq); cq 1129 drivers/infiniband/sw/siw/siw_verbs.c cq->base_cq.cqe = size; cq 1130 drivers/infiniband/sw/siw/siw_verbs.c cq->num_cqe = size; cq 1131 drivers/infiniband/sw/siw/siw_verbs.c cq->xa_cq_index = SIW_INVAL_UOBJ_KEY; cq 1134 drivers/infiniband/sw/siw/siw_verbs.c cq->kernel_verbs = 1; cq 1135 drivers/infiniband/sw/siw/siw_verbs.c cq->queue = vzalloc(size * sizeof(struct siw_cqe) + cq 1138 drivers/infiniband/sw/siw/siw_verbs.c cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) + cq 1141 drivers/infiniband/sw/siw/siw_verbs.c if (cq->queue == NULL) { cq 1145 drivers/infiniband/sw/siw/siw_verbs.c get_random_bytes(&cq->id, 4); cq 1146 drivers/infiniband/sw/siw/siw_verbs.c siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id); cq 1148 drivers/infiniband/sw/siw/siw_verbs.c spin_lock_init(&cq->lock); cq 1150 drivers/infiniband/sw/siw/siw_verbs.c cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; cq 1158 drivers/infiniband/sw/siw/siw_verbs.c cq->xa_cq_index = cq 1159 drivers/infiniband/sw/siw/siw_verbs.c siw_create_uobj(ctx, cq->queue, cq 1162 drivers/infiniband/sw/siw/siw_verbs.c if (cq->xa_cq_index == SIW_INVAL_UOBJ_KEY) { cq 1166 drivers/infiniband/sw/siw/siw_verbs.c uresp.cq_key = cq->xa_cq_index << PAGE_SHIFT; cq 1167 drivers/infiniband/sw/siw/siw_verbs.c uresp.cq_id = cq->id; cq 1183 drivers/infiniband/sw/siw/siw_verbs.c if (cq && cq->queue) { cq 1187 drivers/infiniband/sw/siw/siw_verbs.c if (cq->xa_cq_index != SIW_INVAL_UOBJ_KEY) cq 1188 drivers/infiniband/sw/siw/siw_verbs.c kfree(xa_erase(&ctx->xa, cq->xa_cq_index)); cq 1189 drivers/infiniband/sw/siw/siw_verbs.c vfree(cq->queue); cq 1208 drivers/infiniband/sw/siw/siw_verbs.c struct siw_cq *cq = to_siw_cq(base_cq); cq 1212 drivers/infiniband/sw/siw/siw_verbs.c if (!siw_reap_cqe(cq, wc)) cq 1237 drivers/infiniband/sw/siw/siw_verbs.c struct siw_cq *cq = to_siw_cq(base_cq); cq 1239 drivers/infiniband/sw/siw/siw_verbs.c siw_dbg_cq(cq, "flags: 0x%02x\n", flags); cq 1246 drivers/infiniband/sw/siw/siw_verbs.c smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); cq 1252 drivers/infiniband/sw/siw/siw_verbs.c smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); cq 1255 drivers/infiniband/sw/siw/siw_verbs.c return cq->cq_put - cq->cq_get; cq 1822 drivers/infiniband/sw/siw/siw_verbs.c void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype) cq 1825 drivers/infiniband/sw/siw/siw_verbs.c struct ib_cq *base_cq = &cq->base_cq; cq 1829 drivers/infiniband/sw/siw/siw_verbs.c event.element.cq = base_cq; cq 1832 drivers/infiniband/sw/siw/siw_verbs.c siw_dbg_cq(cq, "reporting CQ event %d\n", etype); cq 87 drivers/infiniband/sw/siw/siw_verbs.h void siw_cq_event(struct siw_cq *cq, enum ib_event_type type); cq 483 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr); cq 484 drivers/infiniband/ulp/ipoib/ipoib.h void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr); cq 527 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr) cq 534 drivers/infiniband/ulp/ipoib/ipoib_ib.c void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr) cq 313 drivers/infiniband/ulp/iser/iscsi_iser.h struct ib_cq *cq; cq 577 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc); cq 578 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc); cq 579 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc); cq 580 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc); cq 581 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc); cq 582 drivers/infiniband/ulp/iser/iscsi_iser.h void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc); cq 558 drivers/infiniband/ulp/iser/iser_initiator.c void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) cq 650 drivers/infiniband/ulp/iser/iser_initiator.c void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) cq 702 drivers/infiniband/ulp/iser/iser_initiator.c void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) cq 708 drivers/infiniband/ulp/iser/iser_initiator.c void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc) cq 724 drivers/infiniband/ulp/iser/iser_initiator.c void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc) cq 70 drivers/infiniband/ulp/iser/iser_memory.c void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) cq 99 drivers/infiniband/ulp/iser/iser_verbs.c comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, cq 101 drivers/infiniband/ulp/iser/iser_verbs.c if (IS_ERR(comp->cq)) { cq 102 drivers/infiniband/ulp/iser/iser_verbs.c comp->cq = NULL; cq 116 drivers/infiniband/ulp/iser/iser_verbs.c if (comp->cq) cq 117 drivers/infiniband/ulp/iser/iser_verbs.c ib_free_cq(comp->cq); cq 138 drivers/infiniband/ulp/iser/iser_verbs.c ib_free_cq(comp->cq); cq 139 drivers/infiniband/ulp/iser/iser_verbs.c comp->cq = NULL; cq 393 drivers/infiniband/ulp/iser/iser_verbs.c init_attr.send_cq = ib_conn->comp->cq; cq 394 drivers/infiniband/ulp/iser/iser_verbs.c init_attr.recv_cq = ib_conn->comp->cq; cq 51 drivers/infiniband/ulp/isert/ib_isert.c static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 52 drivers/infiniband/ulp/isert/ib_isert.c static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc); cq 53 drivers/infiniband/ulp/isert/ib_isert.c static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 54 drivers/infiniband/ulp/isert/ib_isert.c static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc); cq 126 drivers/infiniband/ulp/isert/ib_isert.c attr.send_cq = comp->cq; cq 127 drivers/infiniband/ulp/isert/ib_isert.c attr.recv_cq = comp->cq; cq 241 drivers/infiniband/ulp/isert/ib_isert.c if (comp->cq) cq 242 drivers/infiniband/ulp/isert/ib_isert.c ib_free_cq(comp->cq); cq 272 drivers/infiniband/ulp/isert/ib_isert.c comp->cq = ib_alloc_cq(device->ib_device, comp, max_cqe, i, cq 274 drivers/infiniband/ulp/isert/ib_isert.c if (IS_ERR(comp->cq)) { cq 276 drivers/infiniband/ulp/isert/ib_isert.c ret = PTR_ERR(comp->cq); cq 277 drivers/infiniband/ulp/isert/ib_isert.c comp->cq = NULL; cq 1396 drivers/infiniband/ulp/isert/ib_isert.c isert_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 1455 drivers/infiniband/ulp/isert/ib_isert.c isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 1653 drivers/infiniband/ulp/isert/ib_isert.c isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) cq 1695 drivers/infiniband/ulp/isert/ib_isert.c isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) cq 1771 drivers/infiniband/ulp/isert/ib_isert.c isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 1787 drivers/infiniband/ulp/isert/ib_isert.c isert_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 180 drivers/infiniband/ulp/isert/ib_isert.h struct ib_cq *cq; cq 152 drivers/infiniband/ulp/srp/ib_srp.c static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 153 drivers/infiniband/ulp/srp/ib_srp.c static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, cq 1221 drivers/infiniband/ulp/srp/ib_srp.c static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc) cq 1223 drivers/infiniband/ulp/srp/ib_srp.c srp_handle_qp_err(cq, wc, "INV RKEY"); cq 1509 drivers/infiniband/ulp/srp/ib_srp.c static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc) cq 1511 drivers/infiniband/ulp/srp/ib_srp.c srp_handle_qp_err(cq, wc, "FAST REG"); cq 2050 drivers/infiniband/ulp/srp/ib_srp.c static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 2053 drivers/infiniband/ulp/srp/ib_srp.c struct srp_rdma_ch *ch = cq->cq_context; cq 2056 drivers/infiniband/ulp/srp/ib_srp.c srp_handle_qp_err(cq, wc, "SEND"); cq 2248 drivers/infiniband/ulp/srp/ib_srp.c static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 2251 drivers/infiniband/ulp/srp/ib_srp.c struct srp_rdma_ch *ch = cq->cq_context; cq 2258 drivers/infiniband/ulp/srp/ib_srp.c srp_handle_qp_err(cq, wc, "RECV"); cq 2324 drivers/infiniband/ulp/srp/ib_srp.c static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc, cq 2327 drivers/infiniband/ulp/srp/ib_srp.c struct srp_rdma_ch *ch = cq->cq_context; cq 100 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 101 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc); cq 870 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) cq 872 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_rdma_ch *ch = cq->cq_context; cq 1323 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc) cq 1325 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_rdma_ch *ch = cq->cq_context; cq 1684 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 1686 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_rdma_ch *ch = cq->cq_context; cq 1745 drivers/infiniband/ulp/srpt/ib_srpt.c static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 1747 drivers/infiniband/ulp/srpt/ib_srpt.c struct srpt_rdma_ch *ch = cq->cq_context; cq 1794 drivers/infiniband/ulp/srpt/ib_srpt.c ch->cq = ib_alloc_cq_any(sdev->device, ch, ch->rq_size + sq_size, cq 1796 drivers/infiniband/ulp/srpt/ib_srpt.c if (IS_ERR(ch->cq)) { cq 1797 drivers/infiniband/ulp/srpt/ib_srpt.c ret = PTR_ERR(ch->cq); cq 1806 drivers/infiniband/ulp/srpt/ib_srpt.c qp_init->send_cq = ch->cq; cq 1807 drivers/infiniband/ulp/srpt/ib_srpt.c qp_init->recv_cq = ch->cq; cq 1851 drivers/infiniband/ulp/srpt/ib_srpt.c ib_free_cq(ch->cq); cq 1864 drivers/infiniband/ulp/srpt/ib_srpt.c __func__, ch->cq->cqe, qp_init->cap.max_send_sge, cq 1877 drivers/infiniband/ulp/srpt/ib_srpt.c ib_free_cq(ch->cq); cq 1884 drivers/infiniband/ulp/srpt/ib_srpt.c ib_free_cq(ch->cq); cq 305 drivers/infiniband/ulp/srpt/ib_srpt.h struct ib_cq *cq; cq 830 drivers/isdn/hardware/mISDN/avmfritz.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 832 drivers/isdn/hardware/mISDN/avmfritz.c return mISDN_ctrl_bchannel(bch, cq); cq 867 drivers/isdn/hardware/mISDN/avmfritz.c channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq) cq 871 drivers/isdn/hardware/mISDN/avmfritz.c switch (cq->op) { cq 873 drivers/isdn/hardware/mISDN/avmfritz.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; cq 877 drivers/isdn/hardware/mISDN/avmfritz.c if (cq->channel < 0 || cq->channel > 3) { cq 881 drivers/isdn/hardware/mISDN/avmfritz.c ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); cq 884 drivers/isdn/hardware/mISDN/avmfritz.c ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1); cq 887 drivers/isdn/hardware/mISDN/avmfritz.c pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); cq 3551 drivers/isdn/hardware/mISDN/hfcmulti.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 3555 drivers/isdn/hardware/mISDN/hfcmulti.c (struct dsp_features *)(*((u_long *)&cq->p1)); cq 3563 drivers/isdn/hardware/mISDN/hfcmulti.c switch (cq->op) { cq 3565 drivers/isdn/hardware/mISDN/hfcmulti.c ret = mISDN_ctrl_bchannel(bch, cq); cq 3566 drivers/isdn/hardware/mISDN/hfcmulti.c cq->op |= MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP; cq 3569 drivers/isdn/hardware/mISDN/hfcmulti.c ret = mISDN_ctrl_bchannel(bch, cq); cq 3570 drivers/isdn/hardware/mISDN/hfcmulti.c hc->chan[bch->slot].rx_off = !!cq->p1; cq 3583 drivers/isdn/hardware/mISDN/hfcmulti.c ret = mISDN_ctrl_bchannel(bch, cq); cq 3607 drivers/isdn/hardware/mISDN/hfcmulti.c slot_tx = cq->p1 & 0xff; cq 3608 drivers/isdn/hardware/mISDN/hfcmulti.c bank_tx = cq->p1 >> 8; cq 3609 drivers/isdn/hardware/mISDN/hfcmulti.c slot_rx = cq->p2 & 0xff; cq 3610 drivers/isdn/hardware/mISDN/hfcmulti.c bank_rx = cq->p2 >> 8; cq 3637 drivers/isdn/hardware/mISDN/hfcmulti.c num = cq->p1 & 0xff; cq 3659 drivers/isdn/hardware/mISDN/hfcmulti.c vpm_echocan_on(hc, bch->slot, cq->p1); cq 3674 drivers/isdn/hardware/mISDN/hfcmulti.c ret = mISDN_ctrl_bchannel(bch, cq); cq 4116 drivers/isdn/hardware/mISDN/hfcmulti.c channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) cq 4122 drivers/isdn/hardware/mISDN/hfcmulti.c switch (cq->op) { cq 4124 drivers/isdn/hardware/mISDN/hfcmulti.c cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3; cq 4127 drivers/isdn/hardware/mISDN/hfcmulti.c wd_cnt = cq->p1 & 0xf; cq 4128 drivers/isdn/hardware/mISDN/hfcmulti.c wd_mode = !!(cq->p1 >> 4); cq 4155 drivers/isdn/hardware/mISDN/hfcmulti.c ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); cq 4159 drivers/isdn/hardware/mISDN/hfcmulti.c __func__, cq->op); cq 1520 drivers/isdn/hardware/mISDN/hfcpci.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 1522 drivers/isdn/hardware/mISDN/hfcpci.c return mISDN_ctrl_bchannel(bch, cq); cq 1775 drivers/isdn/hardware/mISDN/hfcpci.c channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq) cq 1780 drivers/isdn/hardware/mISDN/hfcpci.c switch (cq->op) { cq 1782 drivers/isdn/hardware/mISDN/hfcpci.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | cq 1787 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel < 0 || cq->channel > 2) { cq 1791 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel & 1) { cq 1803 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel & 2) { cq 1815 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel & 3) cq 1825 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel == cq->p1) { cq 1829 drivers/isdn/hardware/mISDN/hfcpci.c if (cq->channel < 1 || cq->channel > 2 || cq 1830 drivers/isdn/hardware/mISDN/hfcpci.c cq->p1 < 1 || cq->p1 > 2) { cq 1861 drivers/isdn/hardware/mISDN/hfcpci.c ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); cq 1865 drivers/isdn/hardware/mISDN/hfcpci.c __func__, cq->op); cq 492 drivers/isdn/hardware/mISDN/hfcsusb.c channel_ctrl(struct hfcsusb *hw, struct mISDN_ctrl_req *cq) cq 498 drivers/isdn/hardware/mISDN/hfcsusb.c hw->name, __func__, (cq->op), (cq->channel)); cq 500 drivers/isdn/hardware/mISDN/hfcsusb.c switch (cq->op) { cq 502 drivers/isdn/hardware/mISDN/hfcsusb.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | cq 507 drivers/isdn/hardware/mISDN/hfcsusb.c hw->name, __func__, cq->op); cq 794 drivers/isdn/hardware/mISDN/hfcsusb.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 796 drivers/isdn/hardware/mISDN/hfcsusb.c return mISDN_ctrl_bchannel(bch, cq); cq 1380 drivers/isdn/hardware/mISDN/mISDNipac.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 1382 drivers/isdn/hardware/mISDN/mISDNipac.c return mISDN_ctrl_bchannel(bch, cq); cq 1497 drivers/isdn/hardware/mISDN/mISDNipac.c channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq) cq 1501 drivers/isdn/hardware/mISDN/mISDNipac.c switch (cq->op) { cq 1503 drivers/isdn/hardware/mISDN/mISDNipac.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; cq 1507 drivers/isdn/hardware/mISDN/mISDNipac.c if (cq->channel < 0 || cq->channel > 3) { cq 1511 drivers/isdn/hardware/mISDN/mISDNipac.c ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); cq 1514 drivers/isdn/hardware/mISDN/mISDNipac.c ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1); cq 1517 drivers/isdn/hardware/mISDN/mISDNipac.c pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); cq 1564 drivers/isdn/hardware/mISDN/mISDNisar.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 1566 drivers/isdn/hardware/mISDN/mISDNisar.c return mISDN_ctrl_bchannel(bch, cq); cq 783 drivers/isdn/hardware/mISDN/netjet.c channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq) cq 785 drivers/isdn/hardware/mISDN/netjet.c return mISDN_ctrl_bchannel(&bc->bch, cq); cq 821 drivers/isdn/hardware/mISDN/netjet.c channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq) cq 825 drivers/isdn/hardware/mISDN/netjet.c switch (cq->op) { cq 827 drivers/isdn/hardware/mISDN/netjet.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; cq 831 drivers/isdn/hardware/mISDN/netjet.c if (cq->channel < 0 || cq->channel > 3) { cq 835 drivers/isdn/hardware/mISDN/netjet.c ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); cq 838 drivers/isdn/hardware/mISDN/netjet.c ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1); cq 841 drivers/isdn/hardware/mISDN/netjet.c pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); cq 208 drivers/isdn/hardware/mISDN/speedfax.c channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq) cq 212 drivers/isdn/hardware/mISDN/speedfax.c switch (cq->op) { cq 214 drivers/isdn/hardware/mISDN/speedfax.c cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3; cq 218 drivers/isdn/hardware/mISDN/speedfax.c if (cq->channel < 0 || cq->channel > 3) { cq 222 drivers/isdn/hardware/mISDN/speedfax.c ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); cq 225 drivers/isdn/hardware/mISDN/speedfax.c ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1); cq 228 drivers/isdn/hardware/mISDN/speedfax.c pr_info("%s: unknown Op %x\n", sf->name, cq->op); cq 981 drivers/isdn/hardware/mISDN/w6692.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 983 drivers/isdn/hardware/mISDN/w6692.c return mISDN_ctrl_bchannel(bch, cq); cq 1004 drivers/isdn/hardware/mISDN/w6692.c channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq) cq 1008 drivers/isdn/hardware/mISDN/w6692.c switch (cq->op) { cq 1010 drivers/isdn/hardware/mISDN/w6692.c cq->op = MISDN_CTRL_L1_TIMER3; cq 1013 drivers/isdn/hardware/mISDN/w6692.c ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff)); cq 1016 drivers/isdn/hardware/mISDN/w6692.c pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); cq 364 drivers/isdn/mISDN/dsp_cmx.c struct mISDN_ctrl_req cq; cq 366 drivers/isdn/mISDN/dsp_cmx.c memset(&cq, 0, sizeof(cq)); cq 367 drivers/isdn/mISDN/dsp_cmx.c cq.op = message; cq 368 drivers/isdn/mISDN/dsp_cmx.c cq.p1 = param1 | (param2 << 8); cq 369 drivers/isdn/mISDN/dsp_cmx.c cq.p2 = param3 | (param4 << 8); cq 371 drivers/isdn/mISDN/dsp_cmx.c dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq); cq 190 drivers/isdn/mISDN/dsp_core.c struct mISDN_ctrl_req cq; cq 193 drivers/isdn/mISDN/dsp_core.c memset(&cq, 0, sizeof(cq)); cq 222 drivers/isdn/mISDN/dsp_core.c cq.op = MISDN_CTRL_RX_OFF; cq 223 drivers/isdn/mISDN/dsp_core.c cq.p1 = rx_off; cq 224 drivers/isdn/mISDN/dsp_core.c if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { cq 257 drivers/isdn/mISDN/dsp_core.c struct mISDN_ctrl_req cq; cq 259 drivers/isdn/mISDN/dsp_core.c memset(&cq, 0, sizeof(cq)); cq 267 drivers/isdn/mISDN/dsp_core.c cq.op = MISDN_CTRL_FILL_EMPTY; cq 268 drivers/isdn/mISDN/dsp_core.c cq.p1 = 1; cq 269 drivers/isdn/mISDN/dsp_core.c cq.p2 = dsp_silence; cq 270 drivers/isdn/mISDN/dsp_core.c if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { cq 629 drivers/isdn/mISDN/dsp_core.c struct mISDN_ctrl_req cq; cq 637 drivers/isdn/mISDN/dsp_core.c memset(&cq, 0, sizeof(cq)); cq 638 drivers/isdn/mISDN/dsp_core.c cq.op = MISDN_CTRL_GETOP; cq 639 drivers/isdn/mISDN/dsp_core.c if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) { cq 644 drivers/isdn/mISDN/dsp_core.c if (cq.op & MISDN_CTRL_RX_OFF) cq 646 drivers/isdn/mISDN/dsp_core.c if (cq.op & MISDN_CTRL_FILL_EMPTY) cq 650 drivers/isdn/mISDN/dsp_core.c if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) { cq 651 drivers/isdn/mISDN/dsp_core.c cq.op = MISDN_CTRL_HW_FEATURES; cq 652 drivers/isdn/mISDN/dsp_core.c *((u_long *)&cq.p1) = (u_long)&dsp->features; cq 653 drivers/isdn/mISDN/dsp_core.c if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) { cq 38 drivers/isdn/mISDN/dsp_hwec.c struct mISDN_ctrl_req cq; cq 82 drivers/isdn/mISDN/dsp_hwec.c memset(&cq, 0, sizeof(cq)); cq 83 drivers/isdn/mISDN/dsp_hwec.c cq.op = MISDN_CTRL_HFC_ECHOCAN_ON; cq 84 drivers/isdn/mISDN/dsp_hwec.c cq.p1 = deftaps; cq 85 drivers/isdn/mISDN/dsp_hwec.c if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { cq 94 drivers/isdn/mISDN/dsp_hwec.c struct mISDN_ctrl_req cq; cq 103 drivers/isdn/mISDN/dsp_hwec.c memset(&cq, 0, sizeof(cq)); cq 104 drivers/isdn/mISDN/dsp_hwec.c cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF; cq 105 drivers/isdn/mISDN/dsp_hwec.c if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { cq 156 drivers/isdn/mISDN/hwchannel.c mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 160 drivers/isdn/mISDN/hwchannel.c switch (cq->op) { cq 162 drivers/isdn/mISDN/hwchannel.c cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY | cq 166 drivers/isdn/mISDN/hwchannel.c if (cq->p1) { cq 167 drivers/isdn/mISDN/hwchannel.c memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE); cq 175 drivers/isdn/mISDN/hwchannel.c cq->p2 = bch->dropcnt; cq 176 drivers/isdn/mISDN/hwchannel.c if (cq->p1) cq 183 drivers/isdn/mISDN/hwchannel.c if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE) cq 184 drivers/isdn/mISDN/hwchannel.c bch->next_maxlen = cq->p2; cq 185 drivers/isdn/mISDN/hwchannel.c if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE) cq 186 drivers/isdn/mISDN/hwchannel.c bch->next_minlen = cq->p1; cq 188 drivers/isdn/mISDN/hwchannel.c cq->p1 = bch->minlen; cq 189 drivers/isdn/mISDN/hwchannel.c cq->p2 = bch->maxlen; cq 192 drivers/isdn/mISDN/hwchannel.c pr_info("mISDN unhandled control %x operation\n", cq->op); cq 931 drivers/isdn/mISDN/l1oip_core.c channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq) cq 936 drivers/isdn/mISDN/l1oip_core.c switch (cq->op) { cq 938 drivers/isdn/mISDN/l1oip_core.c cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER cq 942 drivers/isdn/mISDN/l1oip_core.c hc->remoteip = (u32)cq->p1; cq 943 drivers/isdn/mISDN/l1oip_core.c hc->remoteport = cq->p2 & 0xffff; cq 944 drivers/isdn/mISDN/l1oip_core.c hc->localport = cq->p2 >> 16; cq 963 drivers/isdn/mISDN/l1oip_core.c cq->p1 = hc->remoteip; cq 964 drivers/isdn/mISDN/l1oip_core.c cq->p2 = hc->remoteport | (hc->localport << 16); cq 968 drivers/isdn/mISDN/l1oip_core.c __func__, cq->op); cq 1171 drivers/isdn/mISDN/l1oip_core.c channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq) cq 1175 drivers/isdn/mISDN/l1oip_core.c (struct dsp_features *)(*((u_long *)&cq->p1)); cq 1177 drivers/isdn/mISDN/l1oip_core.c switch (cq->op) { cq 1179 drivers/isdn/mISDN/l1oip_core.c cq->op = MISDN_CTRL_HW_FEATURES_OP; cq 1191 drivers/isdn/mISDN/l1oip_core.c __func__, cq->op); cq 281 drivers/isdn/mISDN/socket.c struct mISDN_ctrl_req cq; cq 292 drivers/isdn/mISDN/socket.c if (copy_from_user(&cq, p, sizeof(cq))) { cq 299 drivers/isdn/mISDN/socket.c if (bchan->nr == cq.channel) { cq 301 drivers/isdn/mISDN/socket.c CONTROL_CHANNEL, &cq); cq 307 drivers/isdn/mISDN/socket.c CONTROL_CHANNEL, &cq); cq 310 drivers/isdn/mISDN/socket.c if (copy_to_user(p, &cq, sizeof(cq))) cq 235 drivers/misc/habanalabs/hw_queue.c struct hl_cq *cq; cq 265 drivers/misc/habanalabs/hw_queue.c cq = &hdev->completion_queue[q->hw_queue_id]; cq 266 drivers/misc/habanalabs/hw_queue.c cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry); cq 275 drivers/misc/habanalabs/hw_queue.c cq->pi = hl_cq_inc_ptr(cq->pi); cq 77 drivers/misc/habanalabs/irq.c struct hl_cq *cq = arg; cq 78 drivers/misc/habanalabs/irq.c struct hl_device *hdev = cq->hdev; cq 88 drivers/misc/habanalabs/irq.c irq, cq->hw_queue_id); cq 92 drivers/misc/habanalabs/irq.c cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address; cq 95 drivers/misc/habanalabs/irq.c bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) & cq 102 drivers/misc/habanalabs/irq.c cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; cq 117 drivers/misc/habanalabs/irq.c queue = &hdev->kernel_queues[cq->hw_queue_id]; cq 135 drivers/misc/habanalabs/irq.c cq->ci = hl_cq_inc_ptr(cq->ci); cq 138 drivers/misc/habanalabs/irq.c atomic_inc(&cq->free_slots_cnt); cq 133 drivers/net/ethernet/amazon/ena/ena_com.c struct ena_com_admin_cq *cq = &queue->cq; cq 136 drivers/net/ethernet/amazon/ena/ena_com.c cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr, cq 139 drivers/net/ethernet/amazon/ena/ena_com.c if (!cq->entries) { cq 144 drivers/net/ethernet/amazon/ena/ena_com.c cq->head = 0; cq 145 drivers/net/ethernet/amazon/ena/ena_com.c cq->phase = 1; cq 491 drivers/net/ethernet/amazon/ena/ena_com.c head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1); cq 492 drivers/net/ethernet/amazon/ena/ena_com.c phase = admin_queue->cq.phase; cq 494 drivers/net/ethernet/amazon/ena/ena_com.c cqe = &admin_queue->cq.entries[head_masked]; cq 512 drivers/net/ethernet/amazon/ena/ena_com.c cqe = &admin_queue->cq.entries[head_masked]; cq 515 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->cq.head += comp_num; cq 516 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->cq.phase = phase; cq 1634 drivers/net/ethernet/amazon/ena/ena_com.c struct ena_com_admin_cq *cq = &admin_queue->cq; cq 1649 drivers/net/ethernet/amazon/ena/ena_com.c if (cq->entries) cq 1650 drivers/net/ethernet/amazon/ena/ena_com.c dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq 1651 drivers/net/ethernet/amazon/ena/ena_com.c cq->dma_addr); cq 1652 drivers/net/ethernet/amazon/ena/ena_com.c cq->entries = NULL; cq 1785 drivers/net/ethernet/amazon/ena/ena_com.c addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr); cq 1786 drivers/net/ethernet/amazon/ena/ena_com.c addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr); cq 245 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_com_admin_cq cq; cq 1799 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; cq 1800 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; cq 1801 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; cq 1814 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; cq 1815 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = cq 1817 drivers/net/ethernet/broadcom/cnic.c ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = cq 3893 drivers/net/ethernet/broadcom/cnic_defs.h struct ustorm_iscsi_cq_db cq[8]; cq 495 drivers/net/ethernet/brocade/bna/bfi_enet.h struct bfi_enet_cq cq; cq 567 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxf->rit[offset] = rxp->cq.ccb->id; cq 1475 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); cq 1664 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, cq 1665 drivers/net/ethernet/brocade/bna/bna_tx_rx.c &rxp->cq.qpt); cq 1668 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.ib_seg_host_addr.lsb; cq 1670 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.ib_seg_host_addr.msb; cq 1672 drivers/net/ethernet/brocade/bna/bna_tx_rx.c htons((u16)rxp->cq.ib.intr_vector); cq 1679 drivers/net/ethernet/brocade/bna/bna_tx_rx.c cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) cq 1683 drivers/net/ethernet/brocade/bna/bna_tx_rx.c htonl((u32)rxp->cq.ib.coalescing_timeo); cq 1685 drivers/net/ethernet/brocade/bna/bna_tx_rx.c htonl((u32)rxp->cq.ib.interpkt_timeo); cq 1686 drivers/net/ethernet/brocade/bna/bna_tx_rx.c cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; cq 1735 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_ib_stop(rx->bna, &rxp->cq.ib); cq 1904 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; cq 1905 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; cq 1906 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; cq 1907 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.qpt.page_count = page_count; cq 1908 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.qpt.page_size = page_size; cq 1910 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; cq 1911 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->sw_q = page_mem->kva; cq 1916 drivers/net/ethernet/brocade/bna/bna_tx_rx.c for (i = 0; i < rxp->cq.qpt.page_count; i++) { cq 1917 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->sw_qpt[i] = kva; cq 1921 drivers/net/ethernet/brocade/bna/bna_tx_rx.c ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = cq 1923 drivers/net/ethernet/brocade/bna/bna_tx_rx.c ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = cq 2103 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->i_dbell->doorbell_addr = cq 2119 drivers/net/ethernet/brocade/bna/bna_tx_rx.c (*rxp->cq.ccb->hw_producer_index) = 0; cq 2120 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->producer_index = 0; cq 2345 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.rx = rx; cq 2360 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.ib_seg_host_addr.lsb = cq 2362 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.ib_seg_host_addr.msb = cq 2364 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.ib_seg_host_addr_kva = cq 2366 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.intr_type = intr_info->intr_type; cq 2368 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.intr_vector = rxp->vector; cq 2370 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.intr_vector = BIT(rxp->vector); cq 2371 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; cq 2372 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; cq 2373 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; cq 2436 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; cq 2444 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->q_depth = cq_depth; cq 2445 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->cq = &rxp->cq; cq 2446 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->rcb[0] = q0->rcb; cq 2447 drivers/net/ethernet/brocade/bna/bna_tx_rx.c q0->rcb->ccb = rxp->cq.ccb; cq 2449 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->rcb[1] = q1->rcb; cq 2450 drivers/net/ethernet/brocade/bna/bna_tx_rx.c q1->rcb->ccb = rxp->cq.ccb; cq 2452 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->hw_producer_index = cq 2453 drivers/net/ethernet/brocade/bna/bna_tx_rx.c (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; cq 2454 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; cq 2455 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; cq 2456 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; cq 2457 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->rx_coalescing_timeo = cq 2458 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ib.coalescing_timeo; cq 2459 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; cq 2460 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; cq 2461 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->bnad = bna->bnad; cq 2462 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->id = i; cq 2468 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); cq 2516 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); cq 2517 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb = NULL; cq 2679 drivers/net/ethernet/brocade/bna/bna_tx_rx.c rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; cq 2680 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); cq 2697 drivers/net/ethernet/brocade/bna/bna_tx_rx.c struct bna *bna = ccb->cq->rx->bna; cq 2742 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); cq 623 drivers/net/ethernet/brocade/bna/bna_types.h struct bna_cq *cq; cq 691 drivers/net/ethernet/brocade/bna/bna_types.h struct bna_cq cq; cq 524 drivers/net/ethernet/brocade/bna/bnad.c struct bna_cq_entry *cq, *cmpl; cq 527 drivers/net/ethernet/brocade/bna/bnad.c cq = ccb->sw_q; cq 529 drivers/net/ethernet/brocade/bna/bnad.c cmpl = &cq[pi]; cq 562 drivers/net/ethernet/brocade/bna/bnad.c cmpl = &cq[pi]; cq 589 drivers/net/ethernet/brocade/bna/bnad.c struct bna_cq_entry *cq, *cmpl, *next_cmpl; cq 602 drivers/net/ethernet/brocade/bna/bnad.c cq = ccb->sw_q; cq 605 drivers/net/ethernet/brocade/bna/bnad.c cmpl = &cq[ccb->producer_index]; cq 652 drivers/net/ethernet/brocade/bna/bnad.c next_cmpl = &cq[pi]; cq 722 drivers/net/ethernet/brocade/bna/bnad.c cmpl = &cq[ccb->producer_index]; cq 1022 drivers/net/ethernet/brocade/bna/bnad.c (struct bnad_rx_info *)ccb->cq->rx->priv; cq 1032 drivers/net/ethernet/brocade/bna/bnad.c (struct bnad_rx_info *)ccb->cq->rx->priv; cq 856 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct cmp_queue *cq = &qs->cq[cq_idx]; cq 863 drivers/net/ethernet/cavium/thunder/nicvf_main.c spin_lock_bh(&cq->lock); cq 878 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); cq 880 drivers/net/ethernet/cavium/thunder/nicvf_main.c cqe_head &= (cq->dmem.q_len - 1); cq 882 drivers/net/ethernet/cavium/thunder/nicvf_main.c prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); cq 955 drivers/net/ethernet/cavium/thunder/nicvf_main.c spin_unlock_bh(&cq->lock); cq 965 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct nicvf_cq_poll *cq; cq 967 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq = container_of(napi, struct nicvf_cq_poll, napi); cq 968 drivers/net/ethernet/cavium/thunder/nicvf_main.c work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); cq 975 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq->cq_idx); cq 976 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); cq 978 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq->cq_idx, cq_head); cq 979 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx); cq 476 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct cmp_queue *cq, int q_len) cq 480 drivers/net/ethernet/cavium/thunder/nicvf_queues.c err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, cq 485 drivers/net/ethernet/cavium/thunder/nicvf_queues.c cq->desc = cq->dmem.base; cq 486 drivers/net/ethernet/cavium/thunder/nicvf_queues.c cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH; cq 492 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) cq 494 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!cq) cq 496 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!cq->dmem.base) cq 499 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_free_q_desc_mem(nic, &cq->dmem); cq 820 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct cmp_queue *cq; cq 823 drivers/net/ethernet/cavium/thunder/nicvf_queues.c cq = &qs->cq[qidx]; cq 824 drivers/net/ethernet/cavium/thunder/nicvf_queues.c cq->enable = enable; cq 826 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!cq->enable) { cq 834 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!cq->enable) cq 837 drivers/net/ethernet/cavium/thunder/nicvf_queues.c spin_lock_init(&cq->lock); cq 840 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qidx, (u64)(cq->dmem.phys_base)); cq 852 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); cq 999 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_free_cmp_queue(nic, &qs->cq[qidx]); cq 1026 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len)) cq 300 drivers/net/ethernet/cavium/thunder/nicvf_queues.h struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS]; cq 4778 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.cq.start = val[2]; cq 4779 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->vres.cq.size = val[3] - val[2] + 1; cq 325 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; cq 291 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h struct cxgb4_range cq; cq 4046 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; cq 4048 drivers/net/ethernet/chelsio/cxgb4/sge.c if (cq->q.desc) { cq 4049 drivers/net/ethernet/chelsio/cxgb4/sge.c tasklet_kill(&cq->qresume_tsk); cq 4051 drivers/net/ethernet/chelsio/cxgb4/sge.c cq->q.cntxt_id); cq 4052 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_queue_purge(&cq->sendq); cq 4053 drivers/net/ethernet/chelsio/cxgb4/sge.c free_txq(adap, &cq->q); cq 4138 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_ctrl_txq *cq = &s->ctrlq[i]; cq 4140 drivers/net/ethernet/chelsio/cxgb4/sge.c if (cq->q.desc) cq 4141 drivers/net/ethernet/chelsio/cxgb4/sge.c tasklet_kill(&cq->qresume_tsk); cq 197 drivers/net/ethernet/cisco/enic/enic.h ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX]; cq 261 drivers/net/ethernet/cisco/enic/enic.h return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; cq 267 drivers/net/ethernet/cisco/enic/enic.h return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; cq 1335 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; cq 1452 drivers/net/ethernet/cisco/enic/enic_main.c enic_intr_update_pkt_size(&cq->pkt_size_counter, cq 1481 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; cq 1482 drivers/net/ethernet/cisco/enic/enic_main.c u32 timer = cq->tobe_rx_coal_timeval; cq 1484 drivers/net/ethernet/cisco/enic/enic_main.c if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) { cq 1486 drivers/net/ethernet/cisco/enic/enic_main.c cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval; cq 1493 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; cq 1494 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter; cq 1502 drivers/net/ethernet/cisco/enic/enic_main.c delta = ktime_us_delta(now, cq->prev_ts); cq 1505 drivers/net/ethernet/cisco/enic/enic_main.c cq->prev_ts = now; cq 1530 drivers/net/ethernet/cisco/enic/enic_main.c cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1; cq 1548 drivers/net/ethernet/cisco/enic/enic_main.c wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, cq 1552 drivers/net/ethernet/cisco/enic/enic_main.c rq_work_done = vnic_cq_service(&enic->cq[cq_rq], cq 1640 drivers/net/ethernet/cisco/enic/enic_main.c unsigned int cq; cq 1647 drivers/net/ethernet/cisco/enic/enic_main.c cq = enic_cq_wq(enic, wq_irq); cq 1649 drivers/net/ethernet/cisco/enic/enic_main.c wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, cq 1669 drivers/net/ethernet/cisco/enic/enic_main.c unsigned int cq = enic_cq_rq(enic, rq); cq 1679 drivers/net/ethernet/cisco/enic/enic_main.c work_done = vnic_cq_service(&enic->cq[cq], cq 1878 drivers/net/ethernet/cisco/enic/enic_main.c enic->cq[index].cur_rx_coal_timeval = cq 2044 drivers/net/ethernet/cisco/enic/enic_main.c vnic_cq_clean(&enic->cq[i]); cq 196 drivers/net/ethernet/cisco/enic/enic_res.c vnic_cq_free(&enic->cq[i]); cq 280 drivers/net/ethernet/cisco/enic/enic_res.c vnic_cq_init(&enic->cq[i], cq 355 drivers/net/ethernet/cisco/enic/enic_res.c err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, cq 359 drivers/net/ethernet/cisco/enic/enic_res.c err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, cq 29 drivers/net/ethernet/cisco/enic/vnic_cq.c void vnic_cq_free(struct vnic_cq *cq) cq 31 drivers/net/ethernet/cisco/enic/vnic_cq.c vnic_dev_free_desc_ring(cq->vdev, &cq->ring); cq 33 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->ctrl = NULL; cq 36 drivers/net/ethernet/cisco/enic/vnic_cq.c int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq 41 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->index = index; cq 42 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->vdev = vdev; cq 44 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); cq 45 drivers/net/ethernet/cisco/enic/vnic_cq.c if (!cq->ctrl) { cq 50 drivers/net/ethernet/cisco/enic/vnic_cq.c err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); cq 57 drivers/net/ethernet/cisco/enic/vnic_cq.c void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 65 drivers/net/ethernet/cisco/enic/vnic_cq.c paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; cq 66 drivers/net/ethernet/cisco/enic/vnic_cq.c writeq(paddr, &cq->ctrl->ring_base); cq 67 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); cq 68 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); cq 69 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(color_enable, &cq->ctrl->color_enable); cq 70 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq 71 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_tail, &cq->ctrl->cq_tail); cq 72 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); cq 73 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); cq 74 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); cq 75 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); cq 76 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); cq 77 drivers/net/ethernet/cisco/enic/vnic_cq.c writeq(cq_message_addr, &cq->ctrl->cq_message_addr); cq 79 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->interrupt_offset = interrupt_offset; cq 82 drivers/net/ethernet/cisco/enic/vnic_cq.c void vnic_cq_clean(struct vnic_cq *cq) cq 84 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->to_clean = 0; cq 85 drivers/net/ethernet/cisco/enic/vnic_cq.c cq->last_color = 0; cq 87 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq 88 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_tail); cq 89 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(1, &cq->ctrl->cq_tail_color); cq 91 drivers/net/ethernet/cisco/enic/vnic_cq.c vnic_dev_clear_desc_ring(&cq->ring); cq 72 drivers/net/ethernet/cisco/enic/vnic_cq.h static inline unsigned int vnic_cq_service(struct vnic_cq *cq, cq 83 drivers/net/ethernet/cisco/enic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 84 drivers/net/ethernet/cisco/enic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 88 drivers/net/ethernet/cisco/enic/vnic_cq.h while (color != cq->last_color) { cq 90 drivers/net/ethernet/cisco/enic/vnic_cq.h if ((*q_service)(cq->vdev, cq_desc, type, cq 94 drivers/net/ethernet/cisco/enic/vnic_cq.h cq->to_clean++; cq 95 drivers/net/ethernet/cisco/enic/vnic_cq.h if (cq->to_clean == cq->ring.desc_count) { cq 96 drivers/net/ethernet/cisco/enic/vnic_cq.h cq->to_clean = 0; cq 97 drivers/net/ethernet/cisco/enic/vnic_cq.h cq->last_color = cq->last_color ? 0 : 1; cq 100 drivers/net/ethernet/cisco/enic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 101 drivers/net/ethernet/cisco/enic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 113 drivers/net/ethernet/cisco/enic/vnic_cq.h void vnic_cq_free(struct vnic_cq *cq); cq 114 drivers/net/ethernet/cisco/enic/vnic_cq.h int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq 116 drivers/net/ethernet/cisco/enic/vnic_cq.h void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 121 drivers/net/ethernet/cisco/enic/vnic_cq.h void vnic_cq_clean(struct vnic_cq *cq); cq 206 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info cq; cq 240 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info cq; cq 294 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info cq; cq 517 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; cq 531 drivers/net/ethernet/emulex/benet/be_cmds.c be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); cq 542 drivers/net/ethernet/emulex/benet/be_cmds.c be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); cq 567 drivers/net/ethernet/emulex/benet/be_cmds.c be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); cq 1156 drivers/net/ethernet/emulex/benet/be_cmds.c int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, cq 1161 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_dma_mem *q_mem = &cq->dma_mem; cq 1184 drivers/net/ethernet/emulex/benet/be_cmds.c __ilog2_u32(cq->len / 256)); cq 1201 drivers/net/ethernet/emulex/benet/be_cmds.c __ilog2_u32(cq->len / 256)); cq 1215 drivers/net/ethernet/emulex/benet/be_cmds.c cq->id = le16_to_cpu(resp->cq_id); cq 1216 drivers/net/ethernet/emulex/benet/be_cmds.c cq->created = true; cq 1235 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *cq) cq 1259 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); cq 1262 drivers/net/ethernet/emulex/benet/be_cmds.c req->cq_id = cpu_to_le16(cq->id); cq 1268 drivers/net/ethernet/emulex/benet/be_cmds.c ctxt, cq->id); cq 1300 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *cq) cq 1324 drivers/net/ethernet/emulex/benet/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); cq 1343 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *mccq, struct be_queue_info *cq) cq 1347 drivers/net/ethernet/emulex/benet/be_cmds.c status = be_cmd_mccq_ext_create(adapter, mccq, cq); cq 1352 drivers/net/ethernet/emulex/benet/be_cmds.c status = be_cmd_mccq_org_create(adapter, mccq, cq); cq 1362 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *cq = &txo->cq; cq 1384 drivers/net/ethernet/emulex/benet/be_cmds.c req->cq_id = cpu_to_le16(cq->id); cq 2396 drivers/net/ethernet/emulex/benet/be_cmds.h int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, cq 2400 drivers/net/ethernet/emulex/benet/be_cmds.h struct be_queue_info *cq); cq 1447 drivers/net/ethernet/emulex/benet/be_main.c entry = txo->cq.dma_mem.va; cq 1449 drivers/net/ethernet/emulex/benet/be_main.c i, txo->cq.head, txo->cq.tail, cq 1450 drivers/net/ethernet/emulex/benet/be_main.c atomic_read(&txo->cq.used)); cq 2533 drivers/net/ethernet/emulex/benet/be_main.c struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); cq 2572 drivers/net/ethernet/emulex/benet/be_main.c queue_tail_inc(&rxo->cq); cq 2711 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *tx_cq = &txo->cq; cq 2829 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *rx_cq = &rxo->cq; cq 2892 drivers/net/ethernet/emulex/benet/be_main.c be_cq_notify(adapter, txo->cq.id, false, cmpl); cq 3001 drivers/net/ethernet/emulex/benet/be_main.c q = &adapter->mcc_obj.cq; cq 3010 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q, *cq; cq 3012 drivers/net/ethernet/emulex/benet/be_main.c cq = &adapter->mcc_obj.cq; cq 3013 drivers/net/ethernet/emulex/benet/be_main.c if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, cq 3018 drivers/net/ethernet/emulex/benet/be_main.c if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0)) cq 3025 drivers/net/ethernet/emulex/benet/be_main.c if (be_cmd_mccq_create(adapter, q, cq)) cq 3033 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, cq, QTYPE_CQ); cq 3035 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, cq); cq 3052 drivers/net/ethernet/emulex/benet/be_main.c q = &txo->cq; cq 3061 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *cq; cq 3069 drivers/net/ethernet/emulex/benet/be_main.c cq = &txo->cq; cq 3070 drivers/net/ethernet/emulex/benet/be_main.c status = be_queue_alloc(adapter, cq, TX_CQ_LEN, cq 3082 drivers/net/ethernet/emulex/benet/be_main.c status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3); cq 3111 drivers/net/ethernet/emulex/benet/be_main.c q = &rxo->cq; cq 3120 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *eq, *cq; cq 3142 drivers/net/ethernet/emulex/benet/be_main.c cq = &rxo->cq; cq 3143 drivers/net/ethernet/emulex/benet/be_main.c rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, cq 3150 drivers/net/ethernet/emulex/benet/be_main.c rc = be_cmd_cq_create(adapter, cq, eq, false, 3); cq 3210 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *rx_cq = &rxo->cq; cq 3278 drivers/net/ethernet/emulex/benet/be_main.c be_cq_notify(adapter, txo->cq.id, true, work_done); cq 3698 drivers/net/ethernet/emulex/benet/be_main.c rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, cq 3706 drivers/net/ethernet/emulex/benet/be_main.c rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, cq 3815 drivers/net/ethernet/emulex/benet/be_main.c be_cq_notify(adapter, rxo->cq.id, true, 0); cq 3818 drivers/net/ethernet/emulex/benet/be_main.c be_cq_notify(adapter, txo->cq.id, true, 0); cq 232 drivers/net/ethernet/ibm/ehea/ehea_hw.h static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes) cq 234 drivers/net/ethernet/ibm/ehea/ehea_hw.h struct h_epa epa = cq->epas.kernel; cq 239 drivers/net/ethernet/ibm/ehea/ehea_hw.h static inline void ehea_reset_cq_n1(struct ehea_cq *cq) cq 241 drivers/net/ethernet/ibm/ehea/ehea_hw.h struct h_epa epa = cq->epas.kernel; cq 111 drivers/net/ethernet/ibm/ehea/ehea_qmr.c struct ehea_cq *cq; cq 117 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq 118 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (!cq) cq 121 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq->attr.max_nr_of_cqes = nr_of_cqe; cq 122 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq->attr.cq_token = cq_token; cq 123 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq->attr.eq_handle = eq_handle; cq 125 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq->adapter = adapter; cq 127 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, cq 128 drivers/net/ethernet/ibm/ehea/ehea_qmr.c &cq->fw_handle, &cq->epas); cq 134 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages, cq 139 drivers/net/ethernet/ibm/ehea/ehea_qmr.c for (counter = 0; counter < cq->attr.nr_pages; counter++) { cq 140 drivers/net/ethernet/ibm/ehea/ehea_qmr.c vpage = hw_qpageit_get_inc(&cq->hw_queue); cq 149 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq->fw_handle, rpage, 1); cq 152 drivers/net/ethernet/ibm/ehea/ehea_qmr.c cq, hret, counter, cq->attr.nr_pages); cq 156 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (counter == (cq->attr.nr_pages - 1)) { cq 157 drivers/net/ethernet/ibm/ehea/ehea_qmr.c vpage = hw_qpageit_get_inc(&cq->hw_queue); cq 173 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hw_qeit_reset(&cq->hw_queue); cq 174 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_reset_cq_ep(cq); cq 175 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_reset_cq_n1(cq); cq 177 drivers/net/ethernet/ibm/ehea/ehea_qmr.c return cq; cq 180 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hw_queue_dtor(&cq->hw_queue); cq 183 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE); cq 186 drivers/net/ethernet/ibm/ehea/ehea_qmr.c kfree(cq); cq 192 drivers/net/ethernet/ibm/ehea/ehea_qmr.c static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force) cq 195 drivers/net/ethernet/ibm/ehea/ehea_qmr.c u64 adapter_handle = cq->adapter->handle; cq 198 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); cq 202 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hw_queue_dtor(&cq->hw_queue); cq 203 drivers/net/ethernet/ibm/ehea/ehea_qmr.c kfree(cq); cq 208 drivers/net/ethernet/ibm/ehea/ehea_qmr.c int ehea_destroy_cq(struct ehea_cq *cq) cq 211 drivers/net/ethernet/ibm/ehea/ehea_qmr.c if (!cq) cq 214 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hcp_epas_dtor(&cq->epas); cq 215 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hret = ehea_destroy_cq_res(cq, NORMAL_FREE); cq 217 drivers/net/ethernet/ibm/ehea/ehea_qmr.c ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr); cq 218 drivers/net/ethernet/ibm/ehea/ehea_qmr.c hret = ehea_destroy_cq_res(cq, FORCE_FREE); cq 333 drivers/net/ethernet/ibm/ehea/ehea_qmr.h static inline void ehea_inc_cq(struct ehea_cq *cq) cq 335 drivers/net/ethernet/ibm/ehea/ehea_qmr.h hw_qeit_inc(&cq->hw_queue); cq 367 drivers/net/ethernet/ibm/ehea/ehea_qmr.h int ehea_destroy_cq(struct ehea_cq *cq); cq 26 drivers/net/ethernet/intel/ice/ice_common.h ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq 39 drivers/net/ethernet/intel/ice/ice_common.h ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq 66 drivers/net/ethernet/intel/ice/ice_common.h bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); cq 34 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_info *cq = &hw->adminq; cq 36 drivers/net/ethernet/intel/ice/ice_controlq.c ICE_CQ_INIT_REGS(cq, PF_FW); cq 47 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_info *cq = &hw->mailboxq; cq 49 drivers/net/ethernet/intel/ice/ice_controlq.c ICE_CQ_INIT_REGS(cq, PF_MBX); cq 59 drivers/net/ethernet/intel/ice/ice_controlq.c bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 62 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) cq 63 drivers/net/ethernet/intel/ice/ice_controlq.c return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | cq 64 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.len_ena_mask)) == cq 65 drivers/net/ethernet/intel/ice/ice_controlq.c (cq->num_sq_entries | cq->sq.len_ena_mask); cq 76 drivers/net/ethernet/intel/ice/ice_controlq.c ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 78 drivers/net/ethernet/intel/ice/ice_controlq.c size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); cq 80 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, cq 81 drivers/net/ethernet/intel/ice/ice_controlq.c &cq->sq.desc_buf.pa, cq 83 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.desc_buf.va) cq 85 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.size = size; cq 87 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq 89 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.cmd_buf) { cq 90 drivers/net/ethernet/intel/ice/ice_controlq.c dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, cq 91 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va, cq->sq.desc_buf.pa); cq 92 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va = NULL; cq 93 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.pa = 0; cq 94 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.size = 0; cq 107 drivers/net/ethernet/intel/ice/ice_controlq.c ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 109 drivers/net/ethernet/intel/ice/ice_controlq.c size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); cq 111 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, cq 112 drivers/net/ethernet/intel/ice/ice_controlq.c &cq->rq.desc_buf.pa, cq 114 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->rq.desc_buf.va) cq 116 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.desc_buf.size = size; cq 143 drivers/net/ethernet/intel/ice/ice_controlq.c ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 150 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries, cq 151 drivers/net/ethernet/intel/ice/ice_controlq.c sizeof(cq->rq.desc_buf), GFP_KERNEL); cq 152 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->rq.dma_head) cq 154 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; cq 157 drivers/net/ethernet/intel/ice/ice_controlq.c for (i = 0; i < cq->num_rq_entries; i++) { cq 161 drivers/net/ethernet/intel/ice/ice_controlq.c bi = &cq->rq.r.rq_bi[i]; cq 163 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq_buf_size, &bi->pa, cq 167 drivers/net/ethernet/intel/ice/ice_controlq.c bi->size = cq->rq_buf_size; cq 170 drivers/net/ethernet/intel/ice/ice_controlq.c desc = ICE_CTL_Q_DESC(cq->rq, i); cq 173 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->rq_buf_size > ICE_AQ_LG_BUF) cq 196 drivers/net/ethernet/intel/ice/ice_controlq.c dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, cq 197 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); cq 198 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.r.rq_bi[i].va = NULL; cq 199 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.r.rq_bi[i].pa = 0; cq 200 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.r.rq_bi[i].size = 0; cq 202 drivers/net/ethernet/intel/ice/ice_controlq.c devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); cq 213 drivers/net/ethernet/intel/ice/ice_controlq.c ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 218 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, cq 219 drivers/net/ethernet/intel/ice/ice_controlq.c sizeof(cq->sq.desc_buf), GFP_KERNEL); cq 220 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.dma_head) cq 222 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; cq 225 drivers/net/ethernet/intel/ice/ice_controlq.c for (i = 0; i < cq->num_sq_entries; i++) { cq 228 drivers/net/ethernet/intel/ice/ice_controlq.c bi = &cq->sq.r.sq_bi[i]; cq 230 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq_buf_size, &bi->pa, cq 234 drivers/net/ethernet/intel/ice/ice_controlq.c bi->size = cq->sq_buf_size; cq 242 drivers/net/ethernet/intel/ice/ice_controlq.c dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, cq 243 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); cq 244 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].va = NULL; cq 245 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].pa = 0; cq 246 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].size = 0; cq 248 drivers/net/ethernet/intel/ice/ice_controlq.c devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); cq 280 drivers/net/ethernet/intel/ice/ice_controlq.c ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 282 drivers/net/ethernet/intel/ice/ice_controlq.c return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); cq 293 drivers/net/ethernet/intel/ice/ice_controlq.c ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 297 drivers/net/ethernet/intel/ice/ice_controlq.c status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); cq 302 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); cq 321 drivers/net/ethernet/intel/ice/ice_controlq.c static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 325 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.count > 0) { cq 332 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->num_sq_entries || !cq->sq_buf_size) { cq 337 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_use = 0; cq 338 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_clean = 0; cq 341 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); cq 346 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_alloc_sq_bufs(hw, cq); cq 351 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_cfg_sq_regs(hw, cq); cq 356 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.count = cq->num_sq_entries; cq 360 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->sq); cq 380 drivers/net/ethernet/intel/ice/ice_controlq.c static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 384 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->rq.count > 0) { cq 391 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->num_rq_entries || !cq->rq_buf_size) { cq 396 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.next_to_use = 0; cq 397 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.next_to_clean = 0; cq 400 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); cq 405 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_alloc_rq_bufs(hw, cq); cq 410 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_cfg_rq_regs(hw, cq); cq 415 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.count = cq->num_rq_entries; cq 419 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->rq); cq 454 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 458 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_lock(&cq->sq_lock); cq 460 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.count) { cq 466 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.head, 0); cq 467 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.tail, 0); cq 468 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.len, 0); cq 469 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.bal, 0); cq 470 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.bah, 0); cq 472 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.count = 0; /* to indicate uninitialized queue */ cq 475 drivers/net/ethernet/intel/ice/ice_controlq.c ICE_FREE_CQ_BUFS(hw, cq, sq); cq 476 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->sq); cq 479 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_unlock(&cq->sq_lock); cq 521 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 525 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_lock(&cq->rq_lock); cq 527 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->rq.count) { cq 533 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.head, 0); cq 534 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.tail, 0); cq 535 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.len, 0); cq 536 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.bal, 0); cq 537 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.bah, 0); cq 540 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.count = 0; cq 543 drivers/net/ethernet/intel/ice/ice_controlq.c ICE_FREE_CQ_BUFS(hw, cq, rq); cq 544 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->rq); cq 547 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_unlock(&cq->rq_lock); cq 557 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_info *cq = &hw->adminq; cq 572 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_rq(hw, cq); cq 573 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_sq(hw, cq); cq 593 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_info *cq; cq 599 drivers/net/ethernet/intel/ice/ice_controlq.c cq = &hw->adminq; cq 603 drivers/net/ethernet/intel/ice/ice_controlq.c cq = &hw->mailboxq; cq 608 drivers/net/ethernet/intel/ice/ice_controlq.c cq->qtype = q_type; cq 611 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->num_rq_entries || !cq->num_sq_entries || cq 612 drivers/net/ethernet/intel/ice/ice_controlq.c !cq->rq_buf_size || !cq->sq_buf_size) { cq 617 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; cq 620 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_init_sq(hw, cq); cq 625 drivers/net/ethernet/intel/ice/ice_controlq.c ret_code = ice_init_rq(hw, cq); cq 633 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_sq(hw, cq); cq 673 drivers/net/ethernet/intel/ice/ice_controlq.c static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) cq 675 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_init(&cq->sq_lock); cq 676 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_init(&cq->rq_lock); cq 712 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_info *cq; cq 716 drivers/net/ethernet/intel/ice/ice_controlq.c cq = &hw->adminq; cq 717 drivers/net/ethernet/intel/ice/ice_controlq.c if (ice_check_sq_alive(hw, cq)) cq 721 drivers/net/ethernet/intel/ice/ice_controlq.c cq = &hw->mailboxq; cq 727 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_sq(hw, cq); cq 728 drivers/net/ethernet/intel/ice/ice_controlq.c ice_shutdown_rq(hw, cq); cq 754 drivers/net/ethernet/intel/ice/ice_controlq.c ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) cq 756 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_destroy(&cq->sq_lock); cq 757 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_destroy(&cq->rq_lock); cq 785 drivers/net/ethernet/intel/ice/ice_controlq.c static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 787 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_ring *sq = &cq->sq; cq 795 drivers/net/ethernet/intel/ice/ice_controlq.c while (rd32(hw, cq->sq.head) != ntc) { cq 797 drivers/net/ethernet/intel/ice/ice_controlq.c "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); cq 820 drivers/net/ethernet/intel/ice/ice_controlq.c static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 825 drivers/net/ethernet/intel/ice/ice_controlq.c return rd32(hw, cq->sq.head) == cq->sq.next_to_use; cq 841 drivers/net/ethernet/intel/ice/ice_controlq.c ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq 857 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_lock(&cq->sq_lock); cq 859 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq_last_status = ICE_AQ_RC_OK; cq 861 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.count) { cq 874 drivers/net/ethernet/intel/ice/ice_controlq.c if (buf_size > cq->sq_buf_size) { cq 887 drivers/net/ethernet/intel/ice/ice_controlq.c val = rd32(hw, cq->sq.head); cq 888 drivers/net/ethernet/intel/ice/ice_controlq.c if (val >= cq->num_sq_entries) { cq 896 drivers/net/ethernet/intel/ice/ice_controlq.c details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); cq 907 drivers/net/ethernet/intel/ice/ice_controlq.c if (ice_clean_sq(hw, cq) == 0) { cq 915 drivers/net/ethernet/intel/ice/ice_controlq.c desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); cq 922 drivers/net/ethernet/intel/ice/ice_controlq.c dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; cq 942 drivers/net/ethernet/intel/ice/ice_controlq.c (cq->sq.next_to_use)++; cq 943 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.next_to_use == cq->sq.count) cq 944 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_use = 0; cq 945 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.tail, cq->sq.next_to_use); cq 948 drivers/net/ethernet/intel/ice/ice_controlq.c if (ice_sq_done(hw, cq)) cq 953 drivers/net/ethernet/intel/ice/ice_controlq.c } while (total_delay < cq->sq_cmd_timeout); cq 956 drivers/net/ethernet/intel/ice/ice_controlq.c if (ice_sq_done(hw, cq)) { cq 983 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq_last_status = (enum ice_aq_err)retval; cq 1004 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_unlock(&cq->sq_lock); cq 1035 drivers/net/ethernet/intel/ice/ice_controlq.c ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq 1038 drivers/net/ethernet/intel/ice/ice_controlq.c u16 ntc = cq->rq.next_to_clean; cq 1051 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_lock(&cq->rq_lock); cq 1053 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->rq.count) { cq 1061 drivers/net/ethernet/intel/ice/ice_controlq.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); cq 1070 drivers/net/ethernet/intel/ice/ice_controlq.c desc = ICE_CTL_Q_DESC(cq->rq, ntc); cq 1073 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval); cq 1079 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq_last_status); cq 1085 drivers/net/ethernet/intel/ice/ice_controlq.c memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len); cq 1090 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq_buf_size); cq 1095 drivers/net/ethernet/intel/ice/ice_controlq.c bi = &cq->rq.r.rq_bi[ntc]; cq 1099 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->rq_buf_size > ICE_AQ_LG_BUF) cq 1106 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->rq.tail, ntc); cq 1109 drivers/net/ethernet/intel/ice/ice_controlq.c if (ntc == cq->num_rq_entries) cq 1111 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.next_to_clean = ntc; cq 1112 drivers/net/ethernet/intel/ice/ice_controlq.c cq->rq.next_to_use = ntu; cq 1118 drivers/net/ethernet/intel/ice/ice_controlq.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); cq 1119 drivers/net/ethernet/intel/ice/ice_controlq.c *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); cq 1122 drivers/net/ethernet/intel/ice/ice_controlq.c mutex_unlock(&cq->rq_lock); cq 965 drivers/net/ethernet/intel/ice/ice_main.c struct ice_ctl_q_info *cq; cq 976 drivers/net/ethernet/intel/ice/ice_main.c cq = &hw->adminq; cq 980 drivers/net/ethernet/intel/ice/ice_main.c cq = &hw->mailboxq; cq 992 drivers/net/ethernet/intel/ice/ice_main.c val = rd32(hw, cq->rq.len); cq 1011 drivers/net/ethernet/intel/ice/ice_main.c wr32(hw, cq->rq.len, val); cq 1014 drivers/net/ethernet/intel/ice/ice_main.c val = rd32(hw, cq->sq.len); cq 1033 drivers/net/ethernet/intel/ice/ice_main.c wr32(hw, cq->sq.len, val); cq 1036 drivers/net/ethernet/intel/ice/ice_main.c event.buf_len = cq->rq_buf_size; cq 1046 drivers/net/ethernet/intel/ice/ice_main.c ret = ice_clean_rq_elem(hw, cq, &event, &pending); cq 1093 drivers/net/ethernet/intel/ice/ice_main.c static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq 1097 drivers/net/ethernet/intel/ice/ice_main.c ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); cq 1098 drivers/net/ethernet/intel/ice/ice_main.c return cq->rq.next_to_clean != ntu; cq 472 drivers/net/ethernet/marvell/octeontx2/af/mbox.h struct nix_cq_ctx_s cq; cq 490 drivers/net/ethernet/marvell/octeontx2/af/mbox.h struct nix_cq_ctx_s cq; cq 576 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c memcpy(ctx, &req->cq, sizeof(struct nix_cq_ctx_s)); cq 607 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c if (req->ctype == NIX_AQ_CTYPE_CQ && req->cq.ena) cq 651 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c memcpy(&rsp->cq, ctx, cq 681 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c aq_req.cq.ena = 0; cq 498 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 cq : 20; cq 508 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 cq : 20; cq 676 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 cq : 20; cq 686 drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h u64 cq : 20; cq 82 drivers/net/ethernet/mellanox/mlx4/cq.c static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) cq 84 drivers/net/ethernet/mellanox/mlx4/cq.c struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; cq 94 drivers/net/ethernet/mellanox/mlx4/cq.c if (list_empty_careful(&cq->tasklet_ctx.list)) { cq 95 drivers/net/ethernet/mellanox/mlx4/cq.c refcount_inc(&cq->refcount); cq 97 drivers/net/ethernet/mellanox/mlx4/cq.c list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); cq 106 drivers/net/ethernet/mellanox/mlx4/cq.c struct mlx4_cq *cq; cq 109 drivers/net/ethernet/mellanox/mlx4/cq.c cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, cq 113 drivers/net/ethernet/mellanox/mlx4/cq.c if (!cq) { cq 121 drivers/net/ethernet/mellanox/mlx4/cq.c ++cq->arm_sn; cq 123 drivers/net/ethernet/mellanox/mlx4/cq.c cq->comp(cq); cq 129 drivers/net/ethernet/mellanox/mlx4/cq.c struct mlx4_cq *cq; cq 132 drivers/net/ethernet/mellanox/mlx4/cq.c cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); cq 135 drivers/net/ethernet/mellanox/mlx4/cq.c if (!cq) { cq 143 drivers/net/ethernet/mellanox/mlx4/cq.c cq->event(cq, event_type); cq 169 drivers/net/ethernet/mellanox/mlx4/cq.c int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, cq 184 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); cq 191 drivers/net/ethernet/mellanox/mlx4/cq.c int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, cq 210 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); cq 343 drivers/net/ethernet/mellanox/mlx4/cq.c struct mlx4_cq *cq, unsigned vector, int collapsed, cq 357 drivers/net/ethernet/mellanox/mlx4/cq.c cq->vector = vector; cq 359 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage); cq 364 drivers/net/ethernet/mellanox/mlx4/cq.c err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); cq 403 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init); cq 409 drivers/net/ethernet/mellanox/mlx4/cq.c cq->cons_index = 0; cq 410 drivers/net/ethernet/mellanox/mlx4/cq.c cq->arm_sn = 1; cq 411 drivers/net/ethernet/mellanox/mlx4/cq.c cq->uar = uar; cq 412 drivers/net/ethernet/mellanox/mlx4/cq.c refcount_set(&cq->refcount, 1); cq 413 drivers/net/ethernet/mellanox/mlx4/cq.c init_completion(&cq->free); cq 414 drivers/net/ethernet/mellanox/mlx4/cq.c cq->comp = mlx4_add_cq_to_tasklet; cq 415 drivers/net/ethernet/mellanox/mlx4/cq.c cq->tasklet_ctx.priv = cq 417 drivers/net/ethernet/mellanox/mlx4/cq.c INIT_LIST_HEAD(&cq->tasklet_ctx.list); cq 420 drivers/net/ethernet/mellanox/mlx4/cq.c cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq; cq 425 drivers/net/ethernet/mellanox/mlx4/cq.c radix_tree_delete(&cq_table->tree, cq->cqn); cq 429 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_cq_free_icm(dev, cq->cqn); cq 435 drivers/net/ethernet/mellanox/mlx4/cq.c void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) cq 441 drivers/net/ethernet/mellanox/mlx4/cq.c err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn); cq 443 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); cq 446 drivers/net/ethernet/mellanox/mlx4/cq.c radix_tree_delete(&cq_table->tree, cq->cqn); cq 449 drivers/net/ethernet/mellanox/mlx4/cq.c synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq); cq 450 drivers/net/ethernet/mellanox/mlx4/cq.c if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq != cq 454 drivers/net/ethernet/mellanox/mlx4/cq.c if (refcount_dec_and_test(&cq->refcount)) cq 455 drivers/net/ethernet/mellanox/mlx4/cq.c complete(&cq->free); cq 456 drivers/net/ethernet/mellanox/mlx4/cq.c wait_for_completion(&cq->free); cq 458 drivers/net/ethernet/mellanox/mlx4/cq.c mlx4_cq_free_icm(dev, cq->cqn); cq 40 drivers/net/ethernet/mellanox/mlx4/en_cq.c static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) cq 52 drivers/net/ethernet/mellanox/mlx4/en_cq.c struct mlx4_en_cq *cq; cq 55 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); cq 56 drivers/net/ethernet/mellanox/mlx4/en_cq.c if (!cq) { cq 61 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->size = entries; cq 62 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->buf_size = cq->size * mdev->dev->caps.cqe_size; cq 64 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->ring = ring; cq 65 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->type = mode; cq 66 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = mdev->dev->caps.num_comp_vectors; cq 72 drivers/net/ethernet/mellanox/mlx4/en_cq.c err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, cq 73 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->buf_size); cq 78 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; cq 79 drivers/net/ethernet/mellanox/mlx4/en_cq.c *pcq = cq; cq 84 drivers/net/ethernet/mellanox/mlx4/en_cq.c kfree(cq); cq 89 drivers/net/ethernet/mellanox/mlx4/en_cq.c int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq 97 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->dev = mdev->pndev[priv->port]; cq 98 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.set_ci_db = cq->wqres.db.db; cq 99 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.arm_db = cq->wqres.db.db + 1; cq 100 drivers/net/ethernet/mellanox/mlx4/en_cq.c *cq->mcq.set_ci_db = 0; cq 101 drivers/net/ethernet/mellanox/mlx4/en_cq.c *cq->mcq.arm_db = 0; cq 102 drivers/net/ethernet/mellanox/mlx4/en_cq.c memset(cq->buf, 0, cq->buf_size); cq 104 drivers/net/ethernet/mellanox/mlx4/en_cq.c if (cq->type == RX) { cq 106 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector)) { cq 107 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask); cq 110 drivers/net/ethernet/mellanox/mlx4/en_cq.c &cq->vector); cq 113 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector); cq 120 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->irq_desc = cq 122 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector)); cq 130 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = rx_cq->vector; cq 133 drivers/net/ethernet/mellanox/mlx4/en_cq.c if (cq->type == RX) cq 134 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->size = priv->rx_ring[cq->ring]->actual_size; cq 136 drivers/net/ethernet/mellanox/mlx4/en_cq.c if ((cq->type != RX && priv->hwtstamp_config.tx_type) || cq 137 drivers/net/ethernet/mellanox/mlx4/en_cq.c (cq->type == RX && priv->hwtstamp_config.rx_filter)) cq 140 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.usage = MLX4_RES_USAGE_DRIVER; cq 141 drivers/net/ethernet/mellanox/mlx4/en_cq.c err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, cq 142 drivers/net/ethernet/mellanox/mlx4/en_cq.c &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, cq 143 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector, 0, timestamp_en, &cq->wqres.buf, false); cq 147 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.event = mlx4_en_cq_event; cq 149 drivers/net/ethernet/mellanox/mlx4/en_cq.c switch (cq->type) { cq 151 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.comp = mlx4_en_tx_irq; cq 152 drivers/net/ethernet/mellanox/mlx4/en_cq.c netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, cq 154 drivers/net/ethernet/mellanox/mlx4/en_cq.c napi_enable(&cq->napi); cq 157 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.comp = mlx4_en_rx_irq; cq 158 drivers/net/ethernet/mellanox/mlx4/en_cq.c netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); cq 159 drivers/net/ethernet/mellanox/mlx4/en_cq.c napi_enable(&cq->napi); cq 163 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->xdp_busy = false; cq 171 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_release_eq(mdev->dev, cq->vector); cq 172 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = mdev->dev->caps.num_comp_vectors; cq 179 drivers/net/ethernet/mellanox/mlx4/en_cq.c struct mlx4_en_cq *cq = *pcq; cq 181 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); cq 182 drivers/net/ethernet/mellanox/mlx4/en_cq.c if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && cq 183 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->type == RX) cq 184 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_release_eq(priv->mdev->dev, cq->vector); cq 185 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->vector = 0; cq 186 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->buf_size = 0; cq 187 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->buf = NULL; cq 188 drivers/net/ethernet/mellanox/mlx4/en_cq.c kfree(cq); cq 192 drivers/net/ethernet/mellanox/mlx4/en_cq.c void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq 194 drivers/net/ethernet/mellanox/mlx4/en_cq.c if (cq->type != TX_XDP) { cq 195 drivers/net/ethernet/mellanox/mlx4/en_cq.c napi_disable(&cq->napi); cq 196 drivers/net/ethernet/mellanox/mlx4/en_cq.c netif_napi_del(&cq->napi); cq 199 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_cq_free(priv->mdev->dev, &cq->mcq); cq 203 drivers/net/ethernet/mellanox/mlx4/en_cq.c int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq 205 drivers/net/ethernet/mellanox/mlx4/en_cq.c return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, cq 206 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->moder_cnt, cq->moder_time); cq 209 drivers/net/ethernet/mellanox/mlx4/en_cq.c void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq 211 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, cq 1404 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_cq *cq; cq 1422 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq = priv->rx_cq[i]; cq 1423 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_cnt = priv->rx_frames; cq 1424 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_time = priv->rx_usecs; cq 1432 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq = priv->tx_cq[t][i]; cq 1433 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_cnt = priv->tx_frames; cq 1434 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_time = priv->tx_usecs; cq 1453 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_cq *cq; cq 1496 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq = priv->rx_cq[ring]; cq 1498 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_cnt != priv->rx_frames) { cq 1500 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_time = moder_time; cq 1501 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->moder_cnt = priv->rx_frames; cq 1502 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_set_cq_moder(priv, cq); cq 1620 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_cq *cq; cq 1651 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq = priv->rx_cq[i]; cq 1659 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_activate_cq(priv, cq, i); cq 1666 drivers/net/ethernet/mellanox/mlx4/en_netdev.c for (j = 0; j < cq->size; j++) { cq 1669 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + cq 1674 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_set_cq_moder(priv, cq); cq 1677 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_cq(priv, cq); cq 1681 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_arm_cq(priv, cq); cq 1682 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->cqn = cq->mcq.cqn; cq 1715 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq = priv->tx_cq[t][i]; cq 1716 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_activate_cq(priv, cq, i); cq 1721 drivers/net/ethernet/mellanox/mlx4/en_netdev.c err = mlx4_en_set_cq_moder(priv, cq); cq 1724 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_cq(priv, cq); cq 1729 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->buf->wqe_index = cpu_to_be16(0xffff); cq 1734 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->mcq.cqn, cq 1738 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_cq(priv, cq); cq 1746 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_arm_cq(priv, cq); cq 1994 drivers/net/ethernet/mellanox/mlx4/en_netdev.c struct mlx4_en_cq *cq = priv->rx_cq[i]; cq 1996 drivers/net/ethernet/mellanox/mlx4/en_netdev.c napi_synchronize(&cq->napi); cq 1998 drivers/net/ethernet/mellanox/mlx4/en_netdev.c mlx4_en_deactivate_cq(priv, cq); cq 664 drivers/net/ethernet/mellanox/mlx4/en_rx.c int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) cq 670 drivers/net/ethernet/mellanox/mlx4/en_rx.c int cq_ring = cq->ring; cq 691 drivers/net/ethernet/mellanox/mlx4/en_rx.c index = cq->mcq.cons_index & ring->size_mask; cq 692 drivers/net/ethernet/mellanox/mlx4/en_rx.c cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; cq 696 drivers/net/ethernet/mellanox/mlx4/en_rx.c cq->mcq.cons_index & cq->size)) { cq 822 drivers/net/ethernet/mellanox/mlx4/en_rx.c skb = napi_get_frags(&cq->napi); cq 891 drivers/net/ethernet/mellanox/mlx4/en_rx.c napi_gro_frags(&cq->napi); cq 897 drivers/net/ethernet/mellanox/mlx4/en_rx.c ++cq->mcq.cons_index; cq 898 drivers/net/ethernet/mellanox/mlx4/en_rx.c index = (cq->mcq.cons_index) & ring->size_mask; cq 899 drivers/net/ethernet/mellanox/mlx4/en_rx.c cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; cq 912 drivers/net/ethernet/mellanox/mlx4/en_rx.c mlx4_cq_set_ci(&cq->mcq); cq 914 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring->cons = cq->mcq.cons_index; cq 926 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); cq 927 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct mlx4_en_priv *priv = netdev_priv(cq->dev); cq 930 drivers/net/ethernet/mellanox/mlx4/en_rx.c napi_schedule_irqoff(&cq->napi); cq 932 drivers/net/ethernet/mellanox/mlx4/en_rx.c mlx4_en_arm_cq(priv, cq); cq 938 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); cq 939 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct net_device *dev = cq->dev; cq 946 drivers/net/ethernet/mellanox/mlx4/en_rx.c xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; cq 954 drivers/net/ethernet/mellanox/mlx4/en_rx.c done = mlx4_en_process_rx_cq(dev, cq, budget); cq 968 drivers/net/ethernet/mellanox/mlx4/en_rx.c idata = irq_desc_get_irq_data(cq->irq_desc); cq 985 drivers/net/ethernet/mellanox/mlx4/en_rx.c mlx4_en_arm_cq(priv, cq); cq 184 drivers/net/ethernet/mellanox/mlx4/en_tx.c int cq, int user_prio) cq 189 drivers/net/ethernet/mellanox/mlx4/en_tx.c ring->sp_cqn = cq; cq 396 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_cq *cq, int napi_budget) cq 399 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_cq *mcq = &cq->mcq; cq 400 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->type][cq->ring]; cq 406 drivers/net/ethernet/mellanox/mlx4/en_tx.c int size = cq->size; cq 408 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_cqe *buf = cq->buf; cq 494 drivers/net/ethernet/mellanox/mlx4/en_tx.c if (cq->type == TX_XDP) cq 512 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); cq 513 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_priv *priv = netdev_priv(cq->dev); cq 516 drivers/net/ethernet/mellanox/mlx4/en_tx.c napi_schedule_irqoff(&cq->napi); cq 518 drivers/net/ethernet/mellanox/mlx4/en_tx.c mlx4_en_arm_cq(priv, cq); cq 524 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); cq 525 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct net_device *dev = cq->dev; cq 529 drivers/net/ethernet/mellanox/mlx4/en_tx.c clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); cq 534 drivers/net/ethernet/mellanox/mlx4/en_tx.c mlx4_en_arm_cq(priv, cq); cq 1253 drivers/net/ethernet/mellanox/mlx4/eq.c err = mlx4_create_eq(dev, dev->quotas.cq + cq 999 drivers/net/ethernet/mellanox/mlx4/main.c dev->quotas.cq = func_cap->cq_quota; cq 693 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq 695 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); cq 696 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); cq 697 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); cq 721 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h int cq, int user_prio); cq 736 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h struct mlx4_en_cq *cq, cq 741 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h struct mlx4_en_cq *cq, int napi_budget); cq 194 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq; cq 454 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; cq 464 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c dev->quotas.cq = cq 1672 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c enum res_cq_states state, struct res_cq **cq) cq 1702 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (cq) cq 1703 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c *cq = r; cq 3443 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq = NULL; cq 3446 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq); cq 3459 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c cq->mtt = mtt; cq 3479 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq = NULL; cq 3481 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq); cq 3487 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c atomic_dec(&cq->mtt->ref_count); cq 3503 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq; cq 3506 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = get_res(dev, slave, cqn, RES_CQ, &cq); cq 3510 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (cq->com.from_state != RES_CQ_HW) cq 3525 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq) cq 3533 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt); cq 3537 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (orig_mtt != cq->mtt) { cq 3555 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c cq->mtt = mtt; cq 3575 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq; cq 3578 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = get_res(dev, slave, cqn, RES_CQ, &cq); cq 3582 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (cq->com.from_state != RES_CQ_HW) cq 3586 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq); cq 3671 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (srq->cq) cq 3672 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c atomic_dec(&srq->cq->ref_count); cq 4782 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (srq->cq) cq 4783 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c atomic_dec(&srq->cq->ref_count); cq 4803 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c struct res_cq *cq; cq 4816 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c list_for_each_entry_safe(cq, tmp, cq_list, com.list) { cq 4818 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) { cq 4819 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c cqn = cq->com.res_id; cq 4820 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c state = cq->com.from_state; cq 4826 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c rb_erase(&cq->com.node, cq 4828 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c list_del(&cq->com.list); cq 4832 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c kfree(cq); cq 4845 drivers/net/ethernet/mellanox/mlx4/resource_tracker.c atomic_dec(&cq->mtt->ref_count); cq 71 drivers/net/ethernet/mellanox/mlx5/core/cq.c static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, cq 75 drivers/net/ethernet/mellanox/mlx5/core/cq.c struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv; cq 83 drivers/net/ethernet/mellanox/mlx5/core/cq.c if (list_empty_careful(&cq->tasklet_ctx.list)) { cq 84 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_cq_hold(cq); cq 85 drivers/net/ethernet/mellanox/mlx5/core/cq.c list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); cq 90 drivers/net/ethernet/mellanox/mlx5/core/cq.c int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 109 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq 110 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->cons_index = 0; cq 111 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->arm_sn = 0; cq 112 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->eq = eq; cq 113 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->uid = MLX5_GET(create_cq_in, in, uid); cq 114 drivers/net/ethernet/mellanox/mlx5/core/cq.c refcount_set(&cq->refcount, 1); cq 115 drivers/net/ethernet/mellanox/mlx5/core/cq.c init_completion(&cq->free); cq 116 drivers/net/ethernet/mellanox/mlx5/core/cq.c if (!cq->comp) cq 117 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->comp = mlx5_add_cq_to_tasklet; cq 119 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->tasklet_ctx.priv = &eq->tasklet_ctx; cq 120 drivers/net/ethernet/mellanox/mlx5/core/cq.c INIT_LIST_HEAD(&cq->tasklet_ctx.list); cq 123 drivers/net/ethernet/mellanox/mlx5/core/cq.c err = mlx5_eq_add_cq(&eq->core, cq); cq 128 drivers/net/ethernet/mellanox/mlx5/core/cq.c err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq); cq 132 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->pid = current->pid; cq 133 drivers/net/ethernet/mellanox/mlx5/core/cq.c err = mlx5_debug_cq_add(dev, cq); cq 136 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->cqn); cq 138 drivers/net/ethernet/mellanox/mlx5/core/cq.c cq->uar = dev->priv.uar; cq 143 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_eq_del_cq(&eq->core, cq); cq 148 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); cq 149 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(destroy_cq_in, din, uid, cq->uid); cq 155 drivers/net/ethernet/mellanox/mlx5/core/cq.c int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) cq 161 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); cq 162 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_eq_del_cq(&cq->eq->core, cq); cq 165 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); cq 166 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(destroy_cq_in, in, uid, cq->uid); cq 171 drivers/net/ethernet/mellanox/mlx5/core/cq.c synchronize_irq(cq->irqn); cq 173 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_debug_cq_remove(dev, cq); cq 174 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_cq_put(cq); cq 175 drivers/net/ethernet/mellanox/mlx5/core/cq.c wait_for_completion(&cq->free); cq 181 drivers/net/ethernet/mellanox/mlx5/core/cq.c int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 187 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(query_cq_in, in, cqn, cq->cqn); cq 192 drivers/net/ethernet/mellanox/mlx5/core/cq.c int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 198 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(modify_cq_in, in, uid, cq->uid); cq 204 drivers/net/ethernet/mellanox/mlx5/core/cq.c struct mlx5_core_cq *cq, cq 211 drivers/net/ethernet/mellanox/mlx5/core/cq.c MLX5_SET(modify_cq_in, in, cqn, cq->cqn); cq 219 drivers/net/ethernet/mellanox/mlx5/core/cq.c return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); cq 334 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 347 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c err = mlx5_core_query_cq(dev, cq, out, outlen); cq 356 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c param = cq->pid; cq 499 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) cq 507 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c &cq->dbg, cq->cqn, cq_fields, cq 508 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c ARRAY_SIZE(cq_fields), cq); cq 510 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c cq->dbg = NULL; cq 515 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) cq 520 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c if (cq->dbg) cq 521 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c rem_res_tree(cq->dbg); cq 397 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_cq cq; cq 520 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_cq cq; cq 551 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_cq cq; cq 662 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_cq cq; cq 917 drivers/net/ethernet/mellanox/mlx5/core/en.h bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); cq 918 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); cq 952 drivers/net/ethernet/mellanox/mlx5/core/en.h int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); cq 1029 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_cq_param *param, struct mlx5e_cq *cq); cq 1030 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_close_cq(struct mlx5e_cq *cq); cq 37 drivers/net/ethernet/mellanox/mlx5/core/en/health.c int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) cq 39 drivers/net/ethernet/mellanox/mlx5/core/en/health.c struct mlx5e_priv *priv = cq->channel->priv; cq 45 drivers/net/ethernet/mellanox/mlx5/core/en/health.c err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out)); cq 56 drivers/net/ethernet/mellanox/mlx5/core/en/health.c err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); cq 71 drivers/net/ethernet/mellanox/mlx5/core/en/health.c int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) cq 77 drivers/net/ethernet/mellanox/mlx5/core/en/health.c cq_sz = mlx5_cqwq_get_size(&cq->wq); cq 78 drivers/net/ethernet/mellanox/mlx5/core/en/health.c cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); cq 23 drivers/net/ethernet/mellanox/mlx5/core/en/health.h int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); cq 24 drivers/net/ethernet/mellanox/mlx5/core/en/health.h int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); cq 196 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c eq = rq->cq.mcq.eq; cq 214 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); cq 292 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg); cq 345 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); cq 105 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c eq = sq->cq.mcq.eq; cq 123 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, cq 196 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg); cq 240 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg); cq 142 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h static inline void mlx5e_cq_arm(struct mlx5e_cq *cq) cq 146 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mcq = &cq->mcq; cq 147 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); cq 387 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) cq 395 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq = container_of(cq, struct mlx5e_xdpsq, cq); cq 400 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c cqe = mlx5_cqwq_get_cqe(&cq->wq); cq 414 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5_cqwq_pop(&cq->wq); cq 435 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); cq 442 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5_cqwq_update_db_record(&cq->wq); cq 68 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); cq 76 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c skb = napi_alloc_skb(rq->cq.napi, cqe_bcnt); cq 81 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); cq 89 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); cq 103 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); cq 123 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskicosq.cq); cq 129 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xsksq.cq); cq 135 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskrq.cq); cq 150 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskrq.cq); cq 152 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xskicosq.cq); cq 154 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c mlx5e_close_cq(&c->xsksq.cq); cq 51 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); cq 61 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); cq 525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c &c->sq[tc].cq.mcq, cq 530 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, cq 696 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); cq 1326 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; cq 1424 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; cq 1473 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; cq 1535 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_cq *cq) cq 1537 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_cq *mcq = &cq->mcq; cq 1547 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, cq 1548 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cq->wq_ctrl); cq 1553 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->set_ci_db = cq->wq_ctrl.db.db; cq 1554 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->arm_db = cq->wq_ctrl.db.db + 1; cq 1562 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { cq 1563 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); cq 1568 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cq->mdev = mdev; cq 1575 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_cq *cq) cq 1584 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_cq_common(mdev, param, cq); cq 1586 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cq->napi = &c->napi; cq 1587 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cq->channel = c; cq 1592 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_cq(struct mlx5e_cq *cq) cq 1594 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&cq->wq_ctrl); cq 1597 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) cq 1600 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_dev *mdev = cq->mdev; cq 1601 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_cq *mcq = &cq->mcq; cq 1615 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(u64) * cq->wq_ctrl.buf.npages; cq 1624 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, cq 1630 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - cq 1632 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); cq 1641 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_cq_arm(cq); cq 1646 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_destroy_cq(struct mlx5e_cq *cq) cq 1648 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_core_destroy_cq(cq->mdev, &cq->mcq); cq 1652 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_cq_param *param, struct mlx5e_cq *cq) cq 1657 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_cq(c, param, cq); cq 1661 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_cq(cq, param); cq 1666 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); cq 1670 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_cq(cq); cq 1675 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_close_cq(struct mlx5e_cq *cq) cq 1677 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_cq(cq); cq 1678 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_cq(cq); cq 1690 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->sq[tc].cq); cq 1699 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); cq 1709 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); cq 1855 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); cq 1863 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); cq 1867 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); cq 1873 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->rq_xdpsq.cq) : 0; cq 1921 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq_xdpsq.cq); cq 1924 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq.cq); cq 1927 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->xdpsq.cq); cq 1933 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->icosq.cq); cq 1948 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq_xdpsq.cq); cq 1949 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->rq.cq); cq 1950 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->xdpsq.cq); cq 1952 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->icosq.cq); cq 3141 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_cq *cq, cq 3147 drivers/net/ethernet/mellanox/mlx5/core/en_main.c return mlx5e_alloc_cq_common(mdev, param, cq); cq 3156 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_cq *cq = &drop_rq->cq; cq 3161 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param); cq 3165 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_cq(cq, &cq_param); cq 3187 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_cq(cq); cq 3190 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_cq(cq); cq 3199 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_cq(&drop_rq->cq); cq 3200 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_cq(&drop_rq->cq); cq 590 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) cq 592 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); cq 600 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cqe = mlx5_cqwq_get_cqe(&cq->wq); cq 614 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_cqwq_pop(&cq->wq); cq 619 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c netdev_WARN_ONCE(cq->channel->netdev, cq 622 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c queue_work(cq->channel->priv->wq, &sq->recover_work); cq 638 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c netdev_WARN_ONCE(cq->channel->netdev, cq 644 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); cq 648 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_cqwq_update_db_record(&cq->wq); cq 1107 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c skb = napi_alloc_skb(rq->cq.napi, cq 1180 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c napi_gro_receive(rq->cq.napi, skb); cq 1227 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c napi_gro_receive(rq->cq.napi, skb); cq 1247 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c skb = napi_alloc_skb(rq->cq.napi, cq 1368 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c napi_gro_receive(rq->cq.napi, skb); cq 1380 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) cq 1382 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); cq 1383 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *cqwq = &cq->wq; cq 1541 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c napi_gro_receive(rq->cq.napi, skb); cq 1581 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c napi_gro_receive(rq->cq.napi, skb); cq 406 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_cqwq *wq = &sq->cq.wq; cq 413 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->cq.mcq.cqn, ci, sq->sqn, cq 416 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); cq 419 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) cq 430 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq = container_of(cq, struct mlx5e_txqsq, cq); cq 435 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c cqe = mlx5_cqwq_get_cqe(&cq->wq); cq 457 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_cqwq_pop(&cq->wq); cq 466 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c queue_work(cq->channel->priv->wq, cq 513 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); cq 517 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_cqwq_update_db_record(&cq->wq); cq 58 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); cq 70 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); cq 127 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); cq 129 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); cq 132 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); cq 136 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); cq 139 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); cq 144 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_poll_ico_cq(&c->icosq.cq); cq 148 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) cq 153 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); cq 175 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->sq[i].cq); cq 180 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&rq->cq); cq 181 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->icosq.cq); cq 182 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->xdpsq.cq); cq 186 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->xskicosq.cq); cq 187 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&xsksq->cq); cq 188 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&xskrq->cq); cq 201 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); cq 203 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c napi_schedule(cq->napi); cq 204 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c cq->event_ctr++; cq 205 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c cq->channel->stats->events++; cq 210 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); cq 211 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_channel *c = cq->channel; cq 117 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_core_cq *cq = NULL; cq 120 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq = radix_tree_lookup(&table->tree, cqn); cq 121 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (likely(cq)) cq 122 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_cq_hold(cq); cq 125 drivers/net/ethernet/mellanox/mlx5/core/eq.c return cq; cq 144 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_core_cq *cq; cq 153 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq = mlx5_eq_cq_get(eq, cqn); cq 154 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (likely(cq)) { cq 155 drivers/net/ethernet/mellanox/mlx5/core/eq.c ++cq->arm_sn; cq 156 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq->comp(cq, eqe); cq 157 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_cq_put(cq); cq 383 drivers/net/ethernet/mellanox/mlx5/core/eq.c int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) cq 389 drivers/net/ethernet/mellanox/mlx5/core/eq.c err = radix_tree_insert(&table->tree, cq->cqn, cq); cq 395 drivers/net/ethernet/mellanox/mlx5/core/eq.c void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) cq 401 drivers/net/ethernet/mellanox/mlx5/core/eq.c tmp = radix_tree_delete(&table->tree, cq->cqn); cq 406 drivers/net/ethernet/mellanox/mlx5/core/eq.c eq->eqn, cq->cqn); cq 410 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (tmp != cq) cq 412 drivers/net/ethernet/mellanox/mlx5/core/eq.c eq->eqn, cq->cqn); cq 478 drivers/net/ethernet/mellanox/mlx5/core/eq.c struct mlx5_core_cq *cq; cq 493 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq = mlx5_eq_cq_get(eq, cqn); cq 494 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (unlikely(!cq)) { cq 499 drivers/net/ethernet/mellanox/mlx5/core/eq.c if (cq->event) cq 500 drivers/net/ethernet/mellanox/mlx5/core/eq.c cq->event(cq, type); cq 502 drivers/net/ethernet/mellanox/mlx5/core/eq.c mlx5_cq_put(cq); cq 361 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, cq 362 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->fdev->conn_res.uar->map, conn->cq.wq.cc); cq 370 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); cq 388 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c cqe = mlx5_cqwq_get_cqe(&conn->cq.wq); cq 393 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cqwq_pop(&conn->cq.wq); cq 395 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cqwq_update_db_record(&conn->cq.wq); cq 398 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c tasklet_schedule(&conn->cq.tasklet); cq 402 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc); cq 422 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); cq 448 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq, cq 449 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->cq.wq_ctrl); cq 453 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) { cq 454 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i); cq 459 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c sizeof(u64) * conn->cq.wq_ctrl.buf.npages; cq 476 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - cq 478 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); cq 481 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); cq 483 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); cq 489 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.cqe_sz = 64; cq 490 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; cq 491 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; cq 492 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->cq.mcq.set_ci_db = 0; cq 493 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->cq.mcq.arm_db = 0; cq 494 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.vector = 0; cq 495 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; cq 496 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.event = mlx5_fpga_conn_cq_event; cq 497 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.irqn = irqn; cq 498 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.uar = fdev->conn_res.uar; cq 499 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet, cq 502 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn); cq 507 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_wq_destroy(&conn->cq.wq_ctrl); cq 514 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c tasklet_disable(&conn->cq.tasklet); cq 515 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c tasklet_kill(&conn->cq.tasklet); cq 516 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); cq 517 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_wq_destroy(&conn->cq.wq_ctrl); cq 594 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); cq 595 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); cq 699 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); cq 700 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); cq 980 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c tasklet_disable(&conn->cq.tasklet); cq 981 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c synchronize_irq(conn->cq.mcq.irqn); cq 60 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h } cq; cq 77 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); cq 78 drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); cq 324 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ne = dr_poll_cq(send_ring->cq, 1); cq 706 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c struct mlx5dr_cq *cq; cq 714 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq = kzalloc(sizeof(*cq), GFP_KERNEL); cq 715 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c if (!cq) cq 724 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq, cq 725 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c &cq->wq_ctrl); cq 729 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { cq 730 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cqe = mlx5_cqwq_get_wqe(&cq->wq, i); cq 735 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c sizeof(u64) * cq->wq_ctrl.buf.npages; cq 751 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - cq 753 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); cq 756 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); cq 758 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.event = dr_cq_event; cq 759 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.comp = dr_cq_complete; cq 761 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); cq 767 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.cqe_sz = 64; cq 768 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.set_ci_db = cq->wq_ctrl.db.db; cq 769 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; cq 770 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *cq->mcq.set_ci_db = 0; cq 775 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *cq->mcq.arm_db = cpu_to_be32(2 << 28); cq 777 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.vector = 0; cq 778 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.irqn = irqn; cq 779 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.uar = uar; cq 781 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c return cq; cq 784 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_wq_destroy(&cq->wq_ctrl); cq 786 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c kfree(cq); cq 790 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq) cq 792 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_core_destroy_cq(mdev, &cq->mcq); cq 793 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_wq_destroy(&cq->wq_ctrl); cq 794 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c kfree(cq); cq 875 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); cq 876 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c if (!dmn->send_ring->cq) { cq 881 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c init_attr.cqn = dmn->send_ring->cq->mcq.cqn; cq 892 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dmn->send_ring->cq->qp = dmn->send_ring->qp; cq 944 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_destroy_cq(dmn->mdev, dmn->send_ring->cq); cq 955 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_destroy_cq(dmn->mdev, send_ring->cq); cq 1012 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_cq *cq; cq 906 drivers/net/ethernet/mellanox/mlxsw/cmd.h MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8); cq 88 drivers/net/ethernet/mellanox/mlxsw/pci.c } cq; cq 453 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.v = mlxsw_pci->max_cqe_ver; cq 456 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->u.cq.v == MLXSW_PCI_CQE_V2 && cq 458 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.v = MLXSW_PCI_CQE_V1; cq 472 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); cq 475 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->u.cq.v == MLXSW_PCI_CQE_V1) cq 478 drivers/net/ethernet/mellanox/mlxsw/pci.c else if (q->u.cq.v == MLXSW_PCI_CQE_V2) cq 599 drivers/net/ethernet/mellanox/mlxsw/pci.c owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem); cq 617 drivers/net/ethernet/mellanox/mlxsw/pci.c u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); cq 618 drivers/net/ethernet/mellanox/mlxsw/pci.c u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); cq 630 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.comp_sdq_count++; cq 636 drivers/net/ethernet/mellanox/mlxsw/pci.c wqe_counter, q->u.cq.v, ncqe); cq 637 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.comp_rdq_count++; cq 648 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : cq 654 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : cq 97 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c struct ionic_cq *cq = seq->private; cq 99 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c seq_printf(seq, "%d\n", cq->tail->index); cq 122 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c struct ionic_cq *cq = &qcq->cq; cq 170 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); cq 171 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); cq 172 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); cq 174 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c (u8 *)&cq->done_color); cq 176 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); cq 181 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->data = cq->base; cq 182 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->size = (unsigned long)cq->num_descs * cq->desc_size; cq 269 drivers/net/ethernet/pensando/ionic/ionic_dev.c struct ionic_cq *cq = &qcq->cq; cq 282 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), cq 293 drivers/net/ethernet/pensando/ionic/ionic_dev.c int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, cq 308 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->lif = lif; cq 309 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->bound_intr = intr; cq 310 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->num_descs = num_descs; cq 311 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->desc_size = desc_size; cq 312 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->tail = cq->info; cq 313 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->done_color = 1; cq 315 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur = cq->info; cq 319 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur->next = cq->info; cq 331 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) cq 336 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->base = base; cq 337 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->base_pa = base_pa; cq 339 drivers/net/ethernet/pensando/ionic/ionic_dev.c for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) cq 340 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur->cq_desc = base + (i * cq->desc_size); cq 343 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) cq 345 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->bound_q = q; cq 348 drivers/net/ethernet/pensando/ionic/ionic_dev.c unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, cq 357 drivers/net/ethernet/pensando/ionic/ionic_dev.c while (cb(cq, cq->tail)) { cq 358 drivers/net/ethernet/pensando/ionic/ionic_dev.c if (cq->tail->last) cq 359 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->done_color = !cq->done_color; cq 360 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->tail = cq->tail->next; cq 361 drivers/net/ethernet/pensando/ionic/ionic_dev.c DEBUG_STATS_CQE_CNT(cq); cq 276 drivers/net/ethernet/pensando/ionic/ionic_dev.h int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, cq 279 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); cq 280 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); cq 281 drivers/net/ethernet/pensando/ionic/ionic_dev.h typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); cq 283 drivers/net/ethernet/pensando/ionic/ionic_dev.h unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, cq 284 drivers/net/ethernet/pensando/ionic/ionic_lif.c devm_kfree(dev, qcq->cq.info); cq 285 drivers/net/ethernet/pensando/ionic/ionic_lif.c qcq->cq.info = NULL; cq 325 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index); cq 420 drivers/net/ethernet/pensando/ionic/ionic_lif.c new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, cq 422 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!new->cq.info) { cq 428 drivers/net/ethernet/pensando/ionic/ionic_lif.c err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); cq 458 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_cq_map(&new->cq, cq_base, cq_base_pa); cq 459 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_cq_bind(&new->cq, &new->q); cq 557 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_cq *cq = &qcq->cq; cq 571 drivers/net/ethernet/pensando/ionic/ionic_lif.c .cq_ring_base = cpu_to_le64(cq->base_pa), cq 604 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_cq *cq = &qcq->cq; cq 613 drivers/net/ethernet/pensando/ionic/ionic_lif.c .intr_index = cpu_to_le16(cq->bound_intr->index), cq 617 drivers/net/ethernet/pensando/ionic/ionic_lif.c .cq_ring_base = cpu_to_le64(cq->base_pa), cq 654 drivers/net/ethernet/pensando/ionic/ionic_lif.c static bool ionic_notifyq_service(struct ionic_cq *cq, cq 663 drivers/net/ethernet/pensando/ionic/ionic_lif.c q = cq->bound_q; cq 701 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_cq *cq = &lif->notifyqcq->cq; cq 704 drivers/net/ethernet/pensando/ionic/ionic_lif.c work_done = ionic_cq_service(cq, budget, ionic_notifyq_service, cq 707 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, cq 713 drivers/net/ethernet/pensando/ionic/ionic_lif.c static bool ionic_adminq_service(struct ionic_cq *cq, cq 718 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!color_match(comp->color, cq->done_color)) cq 721 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index)); cq 1412 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_tx_flush(&lif->txqcqs[i].qcq->cq); cq 1415 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_rx_flush(&lif->rxqcqs[i].qcq->cq); cq 66 drivers/net/ethernet/pensando/ionic/ionic_lif.h struct ionic_cq cq; cq 84 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define napi_to_cq(napi) (&napi_to_qcq(napi)->cq) cq 271 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++) cq 288 drivers/net/ethernet/pensando/ionic/ionic_main.c struct ionic_cq *cq = &qcq->cq; cq 291 drivers/net/ethernet/pensando/ionic/ionic_main.c work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg); cq 295 drivers/net/ethernet/pensando/ionic/ionic_main.c DEBUG_STATS_INTR_REARM(cq->bound_intr); cq 300 drivers/net/ethernet/pensando/ionic/ionic_main.c ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl, cq 301 drivers/net/ethernet/pensando/ionic/ionic_main.c cq->bound_intr->index, cq 258 drivers/net/ethernet/pensando/ionic/ionic_stats.c **buf = IONIC_READ_STAT64(&txqcq->cq, cq 287 drivers/net/ethernet/pensando/ionic/ionic_stats.c **buf = IONIC_READ_STAT64(&rxqcq->cq, cq 159 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) cq 162 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_queue *q = cq->bound_q; cq 165 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (!color_match(comp->pkt_type_color, cq->done_color)) cq 204 drivers/net/ethernet/pensando/ionic/ionic_txrx.c void ionic_rx_flush(struct ionic_cq *cq) cq 206 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_dev *idev = &cq->lif->ionic->idev; cq 209 drivers/net/ethernet/pensando/ionic/ionic_txrx.c work_done = ionic_rx_walk_cq(cq, cq->num_descs); cq 212 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, cq 312 drivers/net/ethernet/pensando/ionic/ionic_txrx.c txcq = &lif->txqcqs[qi].qcq->cq; cq 416 drivers/net/ethernet/pensando/ionic/ionic_txrx.c void ionic_tx_flush(struct ionic_cq *cq) cq 418 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_comp *comp = cq->tail->cq_desc; cq 419 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_dev *idev = &cq->lif->ionic->idev; cq 420 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_queue *q = cq->bound_q; cq 425 drivers/net/ethernet/pensando/ionic/ionic_txrx.c while (work_done < cq->num_descs && cq 426 drivers/net/ethernet/pensando/ionic/ionic_txrx.c color_match(comp->color, cq->done_color)) { cq 434 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_clean(q, desc_info, cq->tail, cq 440 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (cq->tail->last) cq 441 drivers/net/ethernet/pensando/ionic/ionic_txrx.c cq->done_color = !cq->done_color; cq 443 drivers/net/ethernet/pensando/ionic/ionic_txrx.c cq->tail = cq->tail->next; cq 444 drivers/net/ethernet/pensando/ionic/ionic_txrx.c comp = cq->tail->cq_desc; cq 445 drivers/net/ethernet/pensando/ionic/ionic_txrx.c DEBUG_STATS_CQE_CNT(cq); cq 451 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, cq 7 drivers/net/ethernet/pensando/ionic/ionic_txrx.h void ionic_rx_flush(struct ionic_cq *cq); cq 8 drivers/net/ethernet/pensando/ionic/ionic_txrx.h void ionic_tx_flush(struct ionic_cq *cq); cq 142 drivers/net/vmxnet3/vmxnet3_defs.h u32 cq:1; /* completion request */ cq 150 drivers/net/vmxnet3/vmxnet3_defs.h u32 cq:1; /* completion request */ cq 133 drivers/net/wireless/intersil/orinoco/wext.c } __packed cq; cq 136 drivers/net/wireless/intersil/orinoco/wext.c HERMES_RID_COMMSQUALITY, &cq); cq 139 drivers/net/wireless/intersil/orinoco/wext.c wstats->qual.qual = (int)le16_to_cpu(cq.qual); cq 140 drivers/net/wireless/intersil/orinoco/wext.c wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; cq 141 drivers/net/wireless/intersil/orinoco/wext.c wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; cq 140 drivers/nvme/host/rdma.c static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 1096 drivers/nvme/host/rdma.c static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, cq 1099 drivers/nvme/host/rdma.c struct nvme_rdma_queue *queue = cq->cq_context; cq 1110 drivers/nvme/host/rdma.c static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) cq 1113 drivers/nvme/host/rdma.c nvme_rdma_wr_error(cq, wc, "MEMREG"); cq 1116 drivers/nvme/host/rdma.c static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) cq 1123 drivers/nvme/host/rdma.c nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); cq 1325 drivers/nvme/host/rdma.c static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 1334 drivers/nvme/host/rdma.c nvme_rdma_wr_error(cq, wc, "SEND"); cq 1408 drivers/nvme/host/rdma.c static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) cq 1411 drivers/nvme/host/rdma.c nvme_rdma_wr_error(cq, wc, "ASYNC"); cq 1485 drivers/nvme/host/rdma.c static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 1489 drivers/nvme/host/rdma.c struct nvme_rdma_queue *queue = cq->cq_context; cq 1495 drivers/nvme/host/rdma.c nvme_rdma_wr_error(cq, wc, "RECV"); cq 728 drivers/nvme/target/core.c void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, cq 731 drivers/nvme/target/core.c cq->qid = qid; cq 732 drivers/nvme/target/core.c cq->size = size; cq 734 drivers/nvme/target/core.c ctrl->cqs[qid] = cq; cq 857 drivers/nvme/target/core.c bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, cq 863 drivers/nvme/target/core.c req->cq = cq; cq 124 drivers/nvme/target/fabrics-cmd.c nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1); cq 291 drivers/nvme/target/nvmet.h struct nvmet_cq *cq; cq 375 drivers/nvme/target/nvmet.h bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, cq 385 drivers/nvme/target/nvmet.h void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, cq 79 drivers/nvme/target/rdma.c struct ib_cq *cq; cq 129 drivers/nvme/target/rdma.c static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); cq 130 drivers/nvme/target/rdma.c static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); cq 131 drivers/nvme/target/rdma.c static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); cq 534 drivers/nvme/target/rdma.c static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) cq 538 drivers/nvme/target/rdma.c struct nvmet_rdma_queue *queue = cq->cq_context; cq 582 drivers/nvme/target/rdma.c static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) cq 586 drivers/nvme/target/rdma.c struct nvmet_rdma_queue *queue = cq->cq_context; cq 787 drivers/nvme/target/rdma.c static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 791 drivers/nvme/target/rdma.c struct nvmet_rdma_queue *queue = cq->cq_context; cq 992 drivers/nvme/target/rdma.c queue->cq = ib_alloc_cq(ndev->device, queue, cq 995 drivers/nvme/target/rdma.c if (IS_ERR(queue->cq)) { cq 996 drivers/nvme/target/rdma.c ret = PTR_ERR(queue->cq); cq 1005 drivers/nvme/target/rdma.c qp_attr.send_cq = queue->cq; cq 1006 drivers/nvme/target/rdma.c qp_attr.recv_cq = queue->cq; cq 1032 drivers/nvme/target/rdma.c __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, cq 1050 drivers/nvme/target/rdma.c ib_free_cq(queue->cq); cq 1061 drivers/nvme/target/rdma.c ib_free_cq(queue->cq); cq 55 drivers/nvme/target/trace.h if ((init && req->sq->qid) || (!init && req->cq->qid)) { cq 120 drivers/nvme/target/trace.h __entry->qid = req->cq->qid; cq 743 drivers/s390/net/qeth_core.h enum qeth_cq cq; cq 295 drivers/s390/net/qeth_core_main.c if (card->options.cq == QETH_CQ_ENABLED) { cq 317 drivers/s390/net/qeth_core_main.c if (card->options.cq == QETH_CQ_ENABLED) { cq 398 drivers/s390/net/qeth_core_main.c if (q->card->options.cq != QETH_CQ_ENABLED) cq 484 drivers/s390/net/qeth_core_main.c return card->options.cq == QETH_CQ_ENABLED && cq 1301 drivers/s390/net/qeth_core_main.c card->options.cq = QETH_CQ_DISABLED; cq 2632 drivers/s390/net/qeth_core_main.c if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { cq 3403 drivers/s390/net/qeth_core_main.c int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) cq 3407 drivers/s390/net/qeth_core_main.c if (card->options.cq == QETH_CQ_NOTAVAILABLE) { cq 3411 drivers/s390/net/qeth_core_main.c if (card->options.cq == cq) { cq 3417 drivers/s390/net/qeth_core_main.c card->options.cq = cq; cq 3430 drivers/s390/net/qeth_core_main.c struct qeth_qdio_q *cq = card->qdio.c_q; cq 3449 drivers/s390/net/qeth_core_main.c struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; cq 4701 drivers/s390/net/qeth_core_main.c card->options.cq = QETH_CQ_NOTAVAILABLE; cq 4720 drivers/s390/net/qeth_core_main.c if (card->options.cq == QETH_CQ_ENABLED) { cq 4820 drivers/s390/net/qeth_core_main.c switch (card->options.cq) { cq 5088 drivers/s390/net/qeth_core_main.c (card->options.cq == QETH_CQ_ENABLED)) cq 5225 drivers/s390/net/qeth_core_main.c WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); cq 5244 drivers/s390/net/qeth_core_main.c if (card->options.cq == QETH_CQ_ENABLED) cq 2056 drivers/s390/net/qeth_l3_main.c if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || cq 2057 drivers/s390/net/qeth_l3_main.c (card->options.cq == QETH_CQ_ENABLED && cq 2419 drivers/s390/net/qeth_l3_main.c if (card->options.cq == QETH_CQ_ENABLED) { cq 211 drivers/s390/net/qeth_l3_sys.c if (card->options.cq == QETH_CQ_ENABLED) cq 293 drivers/s390/net/qeth_l3_sys.c if (card->options.cq == QETH_CQ_NOTAVAILABLE) { cq 91 drivers/scsi/be2iscsi/be.h struct be_queue_info *cq; cq 98 drivers/scsi/be2iscsi/be.h struct be_queue_info cq; cq 776 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *cq, struct be_queue_info *eq, cq 783 drivers/scsi/be2iscsi/be_cmds.c struct be_dma_mem *q_mem = &cq->dma_mem; cq 801 drivers/scsi/be2iscsi/be_cmds.c __ilog2_u32(cq->len / 256)); cq 817 drivers/scsi/be2iscsi/be_cmds.c __ilog2_u32(cq->len / 256)); cq 830 drivers/scsi/be2iscsi/be_cmds.c cq->id = le16_to_cpu(resp->cq_id); cq 831 drivers/scsi/be2iscsi/be_cmds.c cq->created = true; cq 852 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *cq) cq 883 drivers/scsi/be2iscsi/be_cmds.c AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); cq 975 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *cq, cq 1016 drivers/scsi/be2iscsi/be_cmds.c cq_id_recv, ctxt, cq->id); cq 1029 drivers/scsi/be2iscsi/be_cmds.c cq_id_recv, ctxt, cq->id); cq 806 drivers/scsi/be2iscsi/be_cmds.h struct be_queue_info *cq, struct be_queue_info *eq, cq 814 drivers/scsi/be2iscsi/be_cmds.h struct be_queue_info *cq); cq 838 drivers/scsi/be2iscsi/be_cmds.h struct be_queue_info *cq, cq 677 drivers/scsi/be2iscsi/be_main.c mcc = &phba->ctrl.mcc_obj.cq; cq 752 drivers/scsi/be2iscsi/be_main.c mcc = &phba->ctrl.mcc_obj.cq; cq 1799 drivers/scsi/be2iscsi/be_main.c mcc_cq = &phba->ctrl.mcc_obj.cq; cq 1851 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *cq; cq 1862 drivers/scsi/be2iscsi/be_main.c cq = pbe_eq->cq; cq 1863 drivers/scsi/be2iscsi/be_main.c sol = queue_tail_node(cq); cq 1910 drivers/scsi/be2iscsi/be_main.c hwi_ring_cq_db(phba, cq->id, 32, 0); cq 2021 drivers/scsi/be2iscsi/be_main.c queue_tail_inc(cq); cq 2022 drivers/scsi/be2iscsi/be_main.c sol = queue_tail_node(cq); cq 2028 drivers/scsi/be2iscsi/be_main.c hwi_ring_cq_db(phba, cq->id, num_processed, 1); cq 3064 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *cq, *eq; cq 3075 drivers/scsi/be2iscsi/be_main.c cq = &phwi_context->be_cq[i]; cq 3078 drivers/scsi/be2iscsi/be_main.c pbe_eq->cq = cq; cq 3080 drivers/scsi/be2iscsi/be_main.c mem = &cq->dma_mem; cq 3089 drivers/scsi/be2iscsi/be_main.c ret = be_fill_queue(cq, phba->params.num_cq_entries, cq 3099 drivers/scsi/be2iscsi/be_main.c ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, cq 3109 drivers/scsi/be2iscsi/be_main.c "iSCSI CQ CREATED\n", cq->id, eq->id); cq 3115 drivers/scsi/be2iscsi/be_main.c cq = &phwi_context->be_cq[i]; cq 3116 drivers/scsi/be2iscsi/be_main.c mem = &cq->dma_mem; cq 3133 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *dq, *cq; cq 3140 drivers/scsi/be2iscsi/be_main.c cq = &phwi_context->be_cq[0]; cq 3158 drivers/scsi/be2iscsi/be_main.c ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, cq 3185 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *dataq, *cq; cq 3192 drivers/scsi/be2iscsi/be_main.c cq = &phwi_context->be_cq[0]; cq 3211 drivers/scsi/be2iscsi/be_main.c ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, cq 3500 drivers/scsi/be2iscsi/be_main.c q = &phba->ctrl.mcc_obj.cq; cq 3510 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *q, *cq; cq 3514 drivers/scsi/be2iscsi/be_main.c cq = &phba->ctrl.mcc_obj.cq; cq 3515 drivers/scsi/be2iscsi/be_main.c if (be_queue_alloc(phba, cq, MCC_CQ_LEN, cq 3520 drivers/scsi/be2iscsi/be_main.c if (beiscsi_cmd_cq_create(ctrl, cq, cq 3525 drivers/scsi/be2iscsi/be_main.c if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, cq 3536 drivers/scsi/be2iscsi/be_main.c if (beiscsi_cmd_mccq_create(phba, q, cq)) cq 3544 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); cq 3546 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, cq); cq 328 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_cqe *cq; cq 1027 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_cqe *cq; cq 1041 drivers/scsi/bnx2fc/bnx2fc_hwi.c if (!tgt->cq) { cq 1046 drivers/scsi/bnx2fc/bnx2fc_hwi.c cq = tgt->cq; cq 1048 drivers/scsi/bnx2fc/bnx2fc_hwi.c cqe = &cq[cq_cons]; cq 1068 drivers/scsi/bnx2fc/bnx2fc_hwi.c cqe = cq; cq 688 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, cq 690 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (!tgt->cq) { cq 882 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (tgt->cq) { cq 884 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->cq, tgt->cq_dma); cq 885 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->cq = NULL; cq 302 drivers/scsi/fnic/fnic.h ____cacheline_aligned struct vnic_cq cq[FNIC_CQ_MAX]; cq 956 drivers/scsi/fnic/fnic_fcs.c cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, cq 1289 drivers/scsi/fnic/fnic_fcs.c wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], cq 525 drivers/scsi/fnic/fnic_main.c vnic_cq_clean(&fnic->cq[i]); cq 224 drivers/scsi/fnic/fnic_res.c vnic_cq_free(&fnic->cq[i]); cq 286 drivers/scsi/fnic/fnic_res.c &fnic->cq[cq_index], cq_index, cq 296 drivers/scsi/fnic/fnic_res.c err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, cq 307 drivers/scsi/fnic/fnic_res.c err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq 389 drivers/scsi/fnic/fnic_res.c vnic_cq_init(&fnic->cq[i], cq 1341 drivers/scsi/fnic/fnic_scsi.c cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], cq 24 drivers/scsi/fnic/vnic_cq.c void vnic_cq_free(struct vnic_cq *cq) cq 26 drivers/scsi/fnic/vnic_cq.c vnic_dev_free_desc_ring(cq->vdev, &cq->ring); cq 28 drivers/scsi/fnic/vnic_cq.c cq->ctrl = NULL; cq 31 drivers/scsi/fnic/vnic_cq.c int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq 36 drivers/scsi/fnic/vnic_cq.c cq->index = index; cq 37 drivers/scsi/fnic/vnic_cq.c cq->vdev = vdev; cq 39 drivers/scsi/fnic/vnic_cq.c cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); cq 40 drivers/scsi/fnic/vnic_cq.c if (!cq->ctrl) { cq 45 drivers/scsi/fnic/vnic_cq.c err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); cq 52 drivers/scsi/fnic/vnic_cq.c void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 60 drivers/scsi/fnic/vnic_cq.c paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; cq 61 drivers/scsi/fnic/vnic_cq.c writeq(paddr, &cq->ctrl->ring_base); cq 62 drivers/scsi/fnic/vnic_cq.c iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); cq 63 drivers/scsi/fnic/vnic_cq.c iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); cq 64 drivers/scsi/fnic/vnic_cq.c iowrite32(color_enable, &cq->ctrl->color_enable); cq 65 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq 66 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_tail, &cq->ctrl->cq_tail); cq 67 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); cq 68 drivers/scsi/fnic/vnic_cq.c iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); cq 69 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); cq 70 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); cq 71 drivers/scsi/fnic/vnic_cq.c iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); cq 72 drivers/scsi/fnic/vnic_cq.c writeq(cq_message_addr, &cq->ctrl->cq_message_addr); cq 75 drivers/scsi/fnic/vnic_cq.c void vnic_cq_clean(struct vnic_cq *cq) cq 77 drivers/scsi/fnic/vnic_cq.c cq->to_clean = 0; cq 78 drivers/scsi/fnic/vnic_cq.c cq->last_color = 0; cq 80 drivers/scsi/fnic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq 81 drivers/scsi/fnic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_tail); cq 82 drivers/scsi/fnic/vnic_cq.c iowrite32(1, &cq->ctrl->cq_tail_color); cq 84 drivers/scsi/fnic/vnic_cq.c vnic_dev_clear_desc_ring(&cq->ring); cq 70 drivers/scsi/fnic/vnic_cq.h static inline unsigned int vnic_cq_service(struct vnic_cq *cq, cq 81 drivers/scsi/fnic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 82 drivers/scsi/fnic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 86 drivers/scsi/fnic/vnic_cq.h while (color != cq->last_color) { cq 88 drivers/scsi/fnic/vnic_cq.h if ((*q_service)(cq->vdev, cq_desc, type, cq 92 drivers/scsi/fnic/vnic_cq.h cq->to_clean++; cq 93 drivers/scsi/fnic/vnic_cq.h if (cq->to_clean == cq->ring.desc_count) { cq 94 drivers/scsi/fnic/vnic_cq.h cq->to_clean = 0; cq 95 drivers/scsi/fnic/vnic_cq.h cq->last_color = cq->last_color ? 0 : 1; cq 98 drivers/scsi/fnic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 99 drivers/scsi/fnic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 111 drivers/scsi/fnic/vnic_cq.h void vnic_cq_free(struct vnic_cq *cq); cq 112 drivers/scsi/fnic/vnic_cq.h int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq 114 drivers/scsi/fnic/vnic_cq.h void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 119 drivers/scsi/fnic/vnic_cq.h void vnic_cq_clean(struct vnic_cq *cq); cq 24 drivers/scsi/fnic/vnic_cq_copy.h struct vnic_cq *cq, cq 35 drivers/scsi/fnic/vnic_cq_copy.h desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + cq 36 drivers/scsi/fnic/vnic_cq_copy.h cq->ring.desc_size * cq->to_clean); cq 39 drivers/scsi/fnic/vnic_cq_copy.h while (color != cq->last_color) { cq 41 drivers/scsi/fnic/vnic_cq_copy.h if ((*q_service)(cq->vdev, cq->index, desc)) cq 44 drivers/scsi/fnic/vnic_cq_copy.h cq->to_clean++; cq 45 drivers/scsi/fnic/vnic_cq_copy.h if (cq->to_clean == cq->ring.desc_count) { cq 46 drivers/scsi/fnic/vnic_cq_copy.h cq->to_clean = 0; cq 47 drivers/scsi/fnic/vnic_cq_copy.h cq->last_color = cq->last_color ? 0 : 1; cq 50 drivers/scsi/fnic/vnic_cq_copy.h desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + cq 51 drivers/scsi/fnic/vnic_cq_copy.h cq->ring.desc_size * cq->to_clean); cq 360 drivers/scsi/hisi_sas/hisi_sas.h struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES]; cq 1229 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = cq 1230 drivers/scsi/hisi_sas/hisi_sas_main.c &hisi_hba->cq[slot->dlvry_queue]; cq 1235 drivers/scsi/hisi_sas/hisi_sas_main.c tasklet_kill(&cq->tasklet); cq 1621 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq; cq 1628 drivers/scsi/hisi_sas/hisi_sas_main.c cq = &hisi_hba->cq[slot->dlvry_queue]; cq 1629 drivers/scsi/hisi_sas/hisi_sas_main.c tasklet_kill(&cq->tasklet); cq 1686 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; cq 1696 drivers/scsi/hisi_sas/hisi_sas_main.c tasklet_kill(&cq->tasklet); cq 2072 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = cq 2073 drivers/scsi/hisi_sas/hisi_sas_main.c &hisi_hba->cq[slot->dlvry_queue]; cq 2078 drivers/scsi/hisi_sas/hisi_sas_main.c tasklet_kill(&cq->tasklet); cq 2129 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 2130 drivers/scsi/hisi_sas/hisi_sas_main.c const struct cpumask *mask = cq->pci_irq_mask; cq 2229 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 2231 drivers/scsi/hisi_sas/hisi_sas_main.c tasklet_kill(&cq->tasklet); cq 2274 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 2286 drivers/scsi/hisi_sas/hisi_sas_main.c cq->rd_point = 0; cq 2326 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 2330 drivers/scsi/hisi_sas/hisi_sas_main.c cq->id = i; cq 2331 drivers/scsi/hisi_sas/hisi_sas_main.c cq->hisi_hba = hisi_hba; cq 2988 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = cq_ptr; cq 2989 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 2990 drivers/scsi/hisi_sas/hisi_sas_main.c void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; cq 3001 drivers/scsi/hisi_sas/hisi_sas_main.c struct hisi_sas_cq *cq = s->private; cq 3005 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_sas_cq_show_slot(s, slot, cq); cq 3223 drivers/scsi/hisi_sas/hisi_sas_main.c debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], cq 1493 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct hisi_sas_cq *cq = p; cq 1494 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 1496 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c int queue = cq->id; cq 1500 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c u32 rd_point = cq->rd_point, wr_point; cq 1531 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c cq->rd_point = rd_point; cq 1674 drivers/scsi/hisi_sas/hisi_sas_v1_hw.c DRV_NAME " cq", &hisi_hba->cq[i]); cq 3109 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; cq 3110 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 3114 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c u32 rd_point = cq->rd_point, wr_point, dev_id; cq 3115 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c int queue = cq->id; cq 3175 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c cq->rd_point = rd_point; cq 3181 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct hisi_sas_cq *cq = p; cq 3182 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 3183 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c int queue = cq->id; cq 3187 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c tasklet_schedule(&cq->tasklet); cq 3355 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; cq 3356 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c struct tasklet_struct *t = &cq->tasklet; cq 3360 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c DRV_NAME " cq", cq); cq 3367 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); cq 2295 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; cq 2296 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 2299 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c u32 rd_point = cq->rd_point, wr_point; cq 2300 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c int queue = cq->id; cq 2330 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c cq->rd_point = rd_point; cq 2336 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_sas_cq *cq = p; cq 2337 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_hba *hisi_hba = cq->hisi_hba; cq 2338 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c int queue = cq->id; cq 2342 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c tasklet_schedule(&cq->tasklet); cq 2353 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; cq 2359 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c cq->pci_irq_mask = mask; cq 2437 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 2438 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct tasklet_struct *t = &cq->tasklet; cq 2444 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c DRV_NAME " cq", cq); cq 2452 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); cq 3281 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c struct hisi_sas_cq *cq = &hisi_hba->cq[i]; cq 3284 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c free_irq(pci_irq_vector(pdev, nr), cq); cq 5163 drivers/scsi/lpfc/lpfc_attr.c struct lpfc_queue *eq, *cq; cq 5189 drivers/scsi/lpfc/lpfc_attr.c list_for_each_entry(cq, &eq->child_list, list) cq 5190 drivers/scsi/lpfc/lpfc_attr.c cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq 5191 drivers/scsi/lpfc/lpfc_attr.c cq->entry_count); cq 454 drivers/scsi/lpfc/lpfc_debugfs.h struct lpfc_queue *wq, *cq, *eq; cq 463 drivers/scsi/lpfc/lpfc_debugfs.h cq = phba->sli4_hba.hdwq[wqidx].io_cq; cq 467 drivers/scsi/lpfc/lpfc_debugfs.h cq = phba->sli4_hba.mbx_cq; cq 471 drivers/scsi/lpfc/lpfc_debugfs.h cq = phba->sli4_hba.els_cq; cq 475 drivers/scsi/lpfc/lpfc_debugfs.h cq = phba->sli4_hba.nvmels_cq; cq 482 drivers/scsi/lpfc/lpfc_debugfs.h if (cq->assoc_qid == eq->queue_id) cq 494 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, cq 499 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wq->queue_id, cq->queue_id, cq 502 drivers/scsi/lpfc/lpfc_debugfs.h lpfc_debug_dump_q(cq); cq 9239 drivers/scsi/lpfc/lpfc_init.c struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, cq 9245 drivers/scsi/lpfc/lpfc_init.c if (!eq || !cq || !wq) { cq 9248 drivers/scsi/lpfc/lpfc_init.c ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx); cq 9253 drivers/scsi/lpfc/lpfc_init.c rc = lpfc_cq_create(phba, cq, eq, cq 9265 drivers/scsi/lpfc/lpfc_init.c *cq_map = cq->queue_id; cq 9269 drivers/scsi/lpfc/lpfc_init.c qidx, cq->queue_id, qidx, eq->queue_id); cq 9272 drivers/scsi/lpfc/lpfc_init.c rc = lpfc_wq_create(phba, wq, cq, qtype); cq 9284 drivers/scsi/lpfc/lpfc_init.c cq->pring = pring; cq 9288 drivers/scsi/lpfc/lpfc_init.c qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); cq 9290 drivers/scsi/lpfc/lpfc_init.c rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); cq 82 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq, struct lpfc_cqe *cqe); cq 565 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 571 drivers/scsi/lpfc/lpfc_sli.c cq->host_index = ((cq->host_index + 1) % cq->entry_count); cq 574 drivers/scsi/lpfc/lpfc_sli.c if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) cq 575 drivers/scsi/lpfc/lpfc_sli.c cq->qe_valid = (cq->qe_valid) ? 0 : 1; cq 13203 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13209 drivers/scsi/lpfc/lpfc_sli.c cq->CQ_mbox++; cq 13233 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13238 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_sli_ring *pring = cq->pring; cq 13320 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq, cq 13327 drivers/scsi/lpfc/lpfc_sli.c switch (cq->subtype) { cq 13329 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); cq 13343 drivers/scsi/lpfc/lpfc_sli.c cq_event->hdwq = cq->hdwq; cq 13356 drivers/scsi/lpfc/lpfc_sli.c cq->subtype, wcqe->word0, wcqe->parameter, cq 13474 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13488 drivers/scsi/lpfc/lpfc_sli.c workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, cq 13499 drivers/scsi/lpfc/lpfc_sli.c workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, cq 13535 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = NULL, *childq; cq 13543 drivers/scsi/lpfc/lpfc_sli.c cq = childq; cq 13547 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!cq)) { cq 13556 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qp = speq; cq 13558 drivers/scsi/lpfc/lpfc_sli.c if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) cq 13562 drivers/scsi/lpfc/lpfc_sli.c cqid, cq->queue_id, raw_smp_processor_id()); cq 13586 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13598 drivers/scsi/lpfc/lpfc_sli.c if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) cq 13602 drivers/scsi/lpfc/lpfc_sli.c cq->q_flag = 0; cq 13603 drivers/scsi/lpfc/lpfc_sli.c cqe = lpfc_sli4_cq_get(cq); cq 13605 drivers/scsi/lpfc/lpfc_sli.c workposted |= handler(phba, cq, cqe); cq 13606 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_consume_cqe(phba, cq, cqe); cq 13609 drivers/scsi/lpfc/lpfc_sli.c if (!(++count % cq->max_proc_limit)) cq 13612 drivers/scsi/lpfc/lpfc_sli.c if (!(count % cq->notify_interval)) { cq 13613 drivers/scsi/lpfc/lpfc_sli.c phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, cq 13619 drivers/scsi/lpfc/lpfc_sli.c cq->q_flag |= HBA_NVMET_CQ_NOTIFY; cq 13621 drivers/scsi/lpfc/lpfc_sli.c cqe = lpfc_sli4_cq_get(cq); cq 13629 drivers/scsi/lpfc/lpfc_sli.c if (count > cq->CQ_max_cqe) cq 13630 drivers/scsi/lpfc/lpfc_sli.c cq->CQ_max_cqe = count; cq 13632 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qp->EQ_cqe_cnt += count; cq 13638 drivers/scsi/lpfc/lpfc_sli.c "qid=%d\n", cq->queue_id); cq 13640 drivers/scsi/lpfc/lpfc_sli.c cq->queue_claimed = 0; cq 13643 drivers/scsi/lpfc/lpfc_sli.c phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, cq 13665 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) cq 13667 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_hba *phba = cq->phba; cq 13672 drivers/scsi/lpfc/lpfc_sli.c switch (cq->type) { cq 13674 drivers/scsi/lpfc/lpfc_sli.c workposted |= __lpfc_sli4_process_cq(phba, cq, cq 13679 drivers/scsi/lpfc/lpfc_sli.c if (cq->subtype == LPFC_IO) cq 13680 drivers/scsi/lpfc/lpfc_sli.c workposted |= __lpfc_sli4_process_cq(phba, cq, cq 13684 drivers/scsi/lpfc/lpfc_sli.c workposted |= __lpfc_sli4_process_cq(phba, cq, cq 13691 drivers/scsi/lpfc/lpfc_sli.c cq->type); cq 13696 drivers/scsi/lpfc/lpfc_sli.c if (!queue_delayed_work_on(cq->chann, phba->wq, cq 13697 drivers/scsi/lpfc/lpfc_sli.c &cq->sched_spwork, delay)) cq 13701 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id, cq->chann); cq 13719 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); cq 13721 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_sp_process_cq(cq); cq 13733 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(to_delayed_work(work), cq 13736 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_sp_process_cq(cq); cq 13749 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13752 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_sli_ring *pring = cq->pring; cq 13791 drivers/scsi/lpfc/lpfc_sli.c cmdiocbq->isr_timestamp = cq->isr_timestamp; cq 13835 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13844 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(childwq, &cq->child_list, list) { cq 13871 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13888 drivers/scsi/lpfc/lpfc_sli.c idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; cq 13938 drivers/scsi/lpfc/lpfc_sli.c phba, idx, dma_buf, cq->isr_timestamp, cq 13939 drivers/scsi/lpfc/lpfc_sli.c cq->q_flag & HBA_NVMET_CQ_NOTIFY); cq 13980 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 13993 drivers/scsi/lpfc/lpfc_sli.c cq->CQ_wq++; cq 13996 drivers/scsi/lpfc/lpfc_sli.c if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) cq 13997 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, cq 14001 drivers/scsi/lpfc/lpfc_sli.c cq->CQ_release_wqe++; cq 14003 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_fp_handle_rel_wcqe(phba, cq, cq 14007 drivers/scsi/lpfc/lpfc_sli.c cq->CQ_xri_aborted++; cq 14010 drivers/scsi/lpfc/lpfc_sli.c workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, cq 14016 drivers/scsi/lpfc/lpfc_sli.c if (cq->subtype == LPFC_NVMET) { cq 14018 drivers/scsi/lpfc/lpfc_sli.c phba, cq, (struct lpfc_rcqe *)&wcqe); cq 14046 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = NULL; cq 14064 drivers/scsi/lpfc/lpfc_sli.c cq = phba->sli4_hba.cq_lookup[cqid]; cq 14065 drivers/scsi/lpfc/lpfc_sli.c if (cq) cq 14074 drivers/scsi/lpfc/lpfc_sli.c cq = phba->sli4_hba.nvmet_cqset[cqid - id]; cq 14082 drivers/scsi/lpfc/lpfc_sli.c cq = phba->sli4_hba.nvmels_cq; cq 14086 drivers/scsi/lpfc/lpfc_sli.c if (cq == NULL) { cq 14093 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(cqid != cq->queue_id)) { cq 14097 drivers/scsi/lpfc/lpfc_sli.c cqid, cq->queue_id); cq 14104 drivers/scsi/lpfc/lpfc_sli.c cq->isr_timestamp = ktime_get_ns(); cq 14106 drivers/scsi/lpfc/lpfc_sli.c cq->isr_timestamp = 0; cq 14108 drivers/scsi/lpfc/lpfc_sli.c if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) cq 14112 drivers/scsi/lpfc/lpfc_sli.c cqid, cq->queue_id, raw_smp_processor_id()); cq 14131 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) cq 14133 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_hba *phba = cq->phba; cq 14138 drivers/scsi/lpfc/lpfc_sli.c workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, cq 14142 drivers/scsi/lpfc/lpfc_sli.c if (!queue_delayed_work_on(cq->chann, phba->wq, cq 14143 drivers/scsi/lpfc/lpfc_sli.c &cq->sched_irqwork, delay)) cq 14147 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id, cq->chann); cq 14165 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); cq 14167 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_hba_process_cq(cq); cq 14179 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq = container_of(to_delayed_work(work), cq 14182 drivers/scsi/lpfc/lpfc_sli.c __lpfc_sli4_hba_process_cq(cq); cq 14881 drivers/scsi/lpfc/lpfc_sli.c lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 14892 drivers/scsi/lpfc/lpfc_sli.c if (!cq || !eq) cq 14906 drivers/scsi/lpfc/lpfc_sli.c cq->page_count); cq 14913 drivers/scsi/lpfc/lpfc_sli.c (cq->page_size / SLI4_PAGE_SIZE)); cq 14922 drivers/scsi/lpfc/lpfc_sli.c switch (cq->entry_count) { cq 14928 drivers/scsi/lpfc/lpfc_sli.c cq->entry_count; cq 14939 drivers/scsi/lpfc/lpfc_sli.c cq->entry_count, cq->entry_size, cq 14940 drivers/scsi/lpfc/lpfc_sli.c cq->page_count); cq 14941 drivers/scsi/lpfc/lpfc_sli.c if (cq->entry_count < 256) { cq 14959 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &cq->page_list, list) { cq 14960 drivers/scsi/lpfc/lpfc_sli.c memset(dmabuf->virt, 0, cq->page_size); cq 14979 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); cq 14980 drivers/scsi/lpfc/lpfc_sli.c if (cq->queue_id == 0xFFFF) { cq 14985 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&cq->list, &eq->child_list); cq 14987 drivers/scsi/lpfc/lpfc_sli.c cq->type = type; cq 14988 drivers/scsi/lpfc/lpfc_sli.c cq->subtype = subtype; cq 14989 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); cq 14990 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qid = eq->queue_id; cq 14991 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qp = eq; cq 14992 drivers/scsi/lpfc/lpfc_sli.c cq->host_index = 0; cq 14993 drivers/scsi/lpfc/lpfc_sli.c cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; cq 14994 drivers/scsi/lpfc/lpfc_sli.c cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); cq 14996 drivers/scsi/lpfc/lpfc_sli.c if (cq->queue_id > phba->sli4_hba.cq_max) cq 14997 drivers/scsi/lpfc/lpfc_sli.c phba->sli4_hba.cq_max = cq->queue_id; cq 15030 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq; cq 15069 drivers/scsi/lpfc/lpfc_sli.c cq = cqp[idx]; cq 15071 drivers/scsi/lpfc/lpfc_sli.c if (!cq || !eq) { cq 15076 drivers/scsi/lpfc/lpfc_sli.c hw_page_size = cq->page_size; cq 15084 drivers/scsi/lpfc/lpfc_sli.c &cq_set->u.request, cq->page_count); cq 15096 drivers/scsi/lpfc/lpfc_sli.c switch (cq->entry_count) { cq 15103 drivers/scsi/lpfc/lpfc_sli.c cq->entry_count); cq 15113 drivers/scsi/lpfc/lpfc_sli.c cq->entry_count); cq 15114 drivers/scsi/lpfc/lpfc_sli.c if (cq->entry_count < 256) { cq 15198 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&cq->list, &eq->child_list); cq 15200 drivers/scsi/lpfc/lpfc_sli.c cq->type = type; cq 15201 drivers/scsi/lpfc/lpfc_sli.c cq->subtype = subtype; cq 15202 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qid = eq->queue_id; cq 15203 drivers/scsi/lpfc/lpfc_sli.c cq->assoc_qp = eq; cq 15204 drivers/scsi/lpfc/lpfc_sli.c cq->host_index = 0; cq 15205 drivers/scsi/lpfc/lpfc_sli.c cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; cq 15206 drivers/scsi/lpfc/lpfc_sli.c cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq 15207 drivers/scsi/lpfc/lpfc_sli.c cq->entry_count); cq 15208 drivers/scsi/lpfc/lpfc_sli.c cq->chann = idx; cq 15211 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &cq->page_list, list) { cq 15243 drivers/scsi/lpfc/lpfc_sli.c cq = cqp[idx]; cq 15244 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id = rc + idx; cq 15245 drivers/scsi/lpfc/lpfc_sli.c if (cq->queue_id > phba->sli4_hba.cq_max) cq 15246 drivers/scsi/lpfc/lpfc_sli.c phba->sli4_hba.cq_max = cq->queue_id; cq 15270 drivers/scsi/lpfc/lpfc_sli.c LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) cq 15285 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 15336 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq, uint32_t subtype) cq 15348 drivers/scsi/lpfc/lpfc_sli.c if (!mq || !cq) cq 15381 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 15384 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 15431 drivers/scsi/lpfc/lpfc_sli.c lpfc_mq_create_fb_init(phba, mq, mbox, cq); cq 15455 drivers/scsi/lpfc/lpfc_sli.c mq->assoc_qid = cq->queue_id; cq 15461 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&mq->list, &cq->child_list); cq 15491 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *cq, uint32_t subtype) cq 15510 drivers/scsi/lpfc/lpfc_sli.c if (!wq || !cq) cq 15528 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 15720 drivers/scsi/lpfc/lpfc_sli.c wq->assoc_qid = cq->queue_id; cq 15727 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&wq->list, &cq->child_list); cq 15758 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) cq 15772 drivers/scsi/lpfc/lpfc_sli.c if (!hrq || !drq || !cq) cq 15838 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 15914 drivers/scsi/lpfc/lpfc_sli.c hrq->assoc_qid = cq->queue_id; cq 15981 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 16007 drivers/scsi/lpfc/lpfc_sli.c drq->assoc_qid = cq->queue_id; cq 16014 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&hrq->list, &cq->child_list); cq 16015 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&drq->list, &cq->child_list); cq 16050 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *hrq, *drq, *cq; cq 16098 drivers/scsi/lpfc/lpfc_sli.c cq = cqp[idx]; cq 16101 drivers/scsi/lpfc/lpfc_sli.c if (!hrq || !drq || !cq) { cq 16121 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 16165 drivers/scsi/lpfc/lpfc_sli.c hrq->assoc_qid = cq->queue_id; cq 16174 drivers/scsi/lpfc/lpfc_sli.c drq->assoc_qid = cq->queue_id; cq 16180 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&hrq->list, &cq->child_list); cq 16181 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&drq->list, &cq->child_list); cq 16285 drivers/scsi/lpfc/lpfc_sli.c lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) cq 16293 drivers/scsi/lpfc/lpfc_sli.c if (!cq) cq 16295 drivers/scsi/lpfc/lpfc_sli.c mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); cq 16304 drivers/scsi/lpfc/lpfc_sli.c cq->queue_id); cq 16305 drivers/scsi/lpfc/lpfc_sli.c mbox->vport = cq->phba->pport; cq 16307 drivers/scsi/lpfc/lpfc_sli.c rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); cq 16321 drivers/scsi/lpfc/lpfc_sli.c list_del_init(&cq->list); cq 16322 drivers/scsi/lpfc/lpfc_sli.c mempool_free(mbox, cq->phba->mbox_mem_pool); cq 801 drivers/scsi/lpfc/lpfc_sli4.h void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq, cq 265 drivers/scsi/qedf/qedf.h struct fcoe_cqe *cq; cq 2121 drivers/scsi/qedf/qedf_main.c cqe = &que->cq[que->cq_cons_idx]; cq 2806 drivers/scsi/qedf/qedf_main.c if (gl[i]->cq) cq 2808 drivers/scsi/qedf/qedf_main.c gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); cq 2963 drivers/scsi/qedf/qedf_main.c qedf->global_queues[i]->cq = cq 2969 drivers/scsi/qedf/qedf_main.c if (!qedf->global_queues[i]->cq) { cq 155 drivers/scsi/qedi/qedi.h union iscsi_cqe *cq; cq 1228 drivers/scsi/qedi/qedi_main.c cqe = &que->cq[que->cq_cons_idx]; cq 1454 drivers/scsi/qedi/qedi_main.c if (gl[i]->cq) cq 1456 drivers/scsi/qedi/qedi_main.c gl[i]->cq, gl[i]->cq_dma); cq 1624 drivers/scsi/qedi/qedi_main.c qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev, cq 1629 drivers/scsi/qedi/qedi_main.c if (!qedi->global_queues[i]->cq) { cq 341 drivers/scsi/snic/snic.h ____cacheline_aligned struct vnic_cq cq[SNIC_CQ_MAX]; cq 87 drivers/scsi/snic/snic_io.c work_done += svnic_cq_service(&snic->cq[i], cq 253 drivers/scsi/snic/snic_main.c svnic_cq_clean(&snic->cq[i]); cq 140 drivers/scsi/snic/snic_res.c svnic_cq_free(&snic->cq[i]); cq 189 drivers/scsi/snic/snic_res.c &snic->cq[i], cq 201 drivers/scsi/snic/snic_res.c &snic->cq[i], cq 233 drivers/scsi/snic/snic_res.c svnic_cq_init(&snic->cq[i], cq 1289 drivers/scsi/snic/snic_scsi.c nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], cq 24 drivers/scsi/snic/vnic_cq.c void svnic_cq_free(struct vnic_cq *cq) cq 26 drivers/scsi/snic/vnic_cq.c svnic_dev_free_desc_ring(cq->vdev, &cq->ring); cq 28 drivers/scsi/snic/vnic_cq.c cq->ctrl = NULL; cq 31 drivers/scsi/snic/vnic_cq.c int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, cq 36 drivers/scsi/snic/vnic_cq.c cq->index = index; cq 37 drivers/scsi/snic/vnic_cq.c cq->vdev = vdev; cq 39 drivers/scsi/snic/vnic_cq.c cq->ctrl = svnic_dev_get_res(vdev, RES_TYPE_CQ, index); cq 40 drivers/scsi/snic/vnic_cq.c if (!cq->ctrl) { cq 46 drivers/scsi/snic/vnic_cq.c err = svnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); cq 53 drivers/scsi/snic/vnic_cq.c void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 61 drivers/scsi/snic/vnic_cq.c paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET; cq 62 drivers/scsi/snic/vnic_cq.c writeq(paddr, &cq->ctrl->ring_base); cq 63 drivers/scsi/snic/vnic_cq.c iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); cq 64 drivers/scsi/snic/vnic_cq.c iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable); cq 65 drivers/scsi/snic/vnic_cq.c iowrite32(color_enable, &cq->ctrl->color_enable); cq 66 drivers/scsi/snic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq 67 drivers/scsi/snic/vnic_cq.c iowrite32(cq_tail, &cq->ctrl->cq_tail); cq 68 drivers/scsi/snic/vnic_cq.c iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color); cq 69 drivers/scsi/snic/vnic_cq.c iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable); cq 70 drivers/scsi/snic/vnic_cq.c iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable); cq 71 drivers/scsi/snic/vnic_cq.c iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable); cq 72 drivers/scsi/snic/vnic_cq.c iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset); cq 73 drivers/scsi/snic/vnic_cq.c writeq(cq_message_addr, &cq->ctrl->cq_message_addr); cq 76 drivers/scsi/snic/vnic_cq.c void svnic_cq_clean(struct vnic_cq *cq) cq 78 drivers/scsi/snic/vnic_cq.c cq->to_clean = 0; cq 79 drivers/scsi/snic/vnic_cq.c cq->last_color = 0; cq 81 drivers/scsi/snic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq 82 drivers/scsi/snic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_tail); cq 83 drivers/scsi/snic/vnic_cq.c iowrite32(1, &cq->ctrl->cq_tail_color); cq 85 drivers/scsi/snic/vnic_cq.c svnic_dev_clear_desc_ring(&cq->ring); cq 60 drivers/scsi/snic/vnic_cq.h static inline unsigned int svnic_cq_service(struct vnic_cq *cq, cq 71 drivers/scsi/snic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 72 drivers/scsi/snic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 76 drivers/scsi/snic/vnic_cq.h while (color != cq->last_color) { cq 78 drivers/scsi/snic/vnic_cq.h if ((*q_service)(cq->vdev, cq_desc, type, cq 82 drivers/scsi/snic/vnic_cq.h cq->to_clean++; cq 83 drivers/scsi/snic/vnic_cq.h if (cq->to_clean == cq->ring.desc_count) { cq 84 drivers/scsi/snic/vnic_cq.h cq->to_clean = 0; cq 85 drivers/scsi/snic/vnic_cq.h cq->last_color = cq->last_color ? 0 : 1; cq 88 drivers/scsi/snic/vnic_cq.h cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + cq 89 drivers/scsi/snic/vnic_cq.h cq->ring.desc_size * cq->to_clean); cq 101 drivers/scsi/snic/vnic_cq.h void svnic_cq_free(struct vnic_cq *cq); cq 102 drivers/scsi/snic/vnic_cq.h int svnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, cq 104 drivers/scsi/snic/vnic_cq.h void svnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, cq 109 drivers/scsi/snic/vnic_cq.h void svnic_cq_clean(struct vnic_cq *cq); cq 24 drivers/scsi/snic/vnic_cq_fw.h vnic_cq_fw_service(struct vnic_cq *cq, cq 35 drivers/scsi/snic/vnic_cq_fw.h desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + cq 36 drivers/scsi/snic/vnic_cq_fw.h cq->ring.desc_size * cq->to_clean); cq 39 drivers/scsi/snic/vnic_cq_fw.h while (color != cq->last_color) { cq 41 drivers/scsi/snic/vnic_cq_fw.h if ((*q_service)(cq->vdev, cq->index, desc)) cq 44 drivers/scsi/snic/vnic_cq_fw.h cq->to_clean++; cq 45 drivers/scsi/snic/vnic_cq_fw.h if (cq->to_clean == cq->ring.desc_count) { cq 46 drivers/scsi/snic/vnic_cq_fw.h cq->to_clean = 0; cq 47 drivers/scsi/snic/vnic_cq_fw.h cq->last_color = cq->last_color ? 0 : 1; cq 50 drivers/scsi/snic/vnic_cq_fw.h desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + cq 51 drivers/scsi/snic/vnic_cq_fw.h cq->ring.desc_size * cq->to_clean); cq 22 drivers/tee/optee/call.c static void optee_cq_wait_init(struct optee_call_queue *cq, cq 33 drivers/tee/optee/call.c mutex_lock(&cq->mutex); cq 42 drivers/tee/optee/call.c list_add_tail(&w->list_node, &cq->waiters); cq 44 drivers/tee/optee/call.c mutex_unlock(&cq->mutex); cq 47 drivers/tee/optee/call.c static void optee_cq_wait_for_completion(struct optee_call_queue *cq, cq 52 drivers/tee/optee/call.c mutex_lock(&cq->mutex); cq 57 drivers/tee/optee/call.c list_add_tail(&w->list_node, &cq->waiters); cq 59 drivers/tee/optee/call.c mutex_unlock(&cq->mutex); cq 62 drivers/tee/optee/call.c static void optee_cq_complete_one(struct optee_call_queue *cq) cq 66 drivers/tee/optee/call.c list_for_each_entry(w, &cq->waiters, list_node) { cq 74 drivers/tee/optee/call.c static void optee_cq_wait_final(struct optee_call_queue *cq, cq 82 drivers/tee/optee/call.c mutex_lock(&cq->mutex); cq 88 drivers/tee/optee/call.c optee_cq_complete_one(cq); cq 97 drivers/tee/optee/call.c optee_cq_complete_one(cq); cq 99 drivers/tee/optee/call.c mutex_unlock(&cq->mutex); cq 266 fs/cifs/smbdirect.c static void send_done(struct ib_cq *cq, struct ib_wc *wc) cq 482 fs/cifs/smbdirect.c static void recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 2244 fs/cifs/smbdirect.c static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) cq 2549 fs/cifs/smbdirect.c static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) cq 104 fs/io_uring.c struct io_uring sq, cq; cq 481 fs/io_uring.c if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) { cq 483 fs/io_uring.c smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); cq 563 fs/io_uring.c if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) cq 751 fs/io_uring.c return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); cq 3020 fs/io_uring.c return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; cq 3665 fs/io_uring.c if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail) cq 3953 fs/io_uring.c p->cq_off.head = offsetof(struct io_rings, cq.head); cq 3954 fs/io_uring.c p->cq_off.tail = offsetof(struct io_rings, cq.tail); cq 142 include/linux/mlx4/cq.h static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, cq 150 include/linux/mlx4/cq.h sn = cq->arm_sn & 3; cq 151 include/linux/mlx4/cq.h ci = cq->cons_index & 0xffffff; cq 153 include/linux/mlx4/cq.h *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); cq 161 include/linux/mlx4/cq.h doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn); cq 167 include/linux/mlx4/cq.h static inline void mlx4_cq_set_ci(struct mlx4_cq *cq) cq 169 include/linux/mlx4/cq.h *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); cq 177 include/linux/mlx4/cq.h int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq, cq 179 include/linux/mlx4/cq.h int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq, cq 843 include/linux/mlx4/device.h int cq; cq 1139 include/linux/mlx4/device.h struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, cq 1142 include/linux/mlx4/device.h void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); cq 50 include/linux/mlx5/cq.h void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); cq 58 include/linux/mlx5/cq.h void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe); cq 142 include/linux/mlx5/cq.h static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) cq 144 include/linux/mlx5/cq.h *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); cq 152 include/linux/mlx5/cq.h static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, cq 160 include/linux/mlx5/cq.h sn = cq->arm_sn & 3; cq 163 include/linux/mlx5/cq.h *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); cq 171 include/linux/mlx5/cq.h doorbell[1] = cpu_to_be32(cq->cqn); cq 176 include/linux/mlx5/cq.h static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) cq 178 include/linux/mlx5/cq.h refcount_inc(&cq->refcount); cq 181 include/linux/mlx5/cq.h static inline void mlx5_cq_put(struct mlx5_core_cq *cq) cq 183 include/linux/mlx5/cq.h if (refcount_dec_and_test(&cq->refcount)) cq 184 include/linux/mlx5/cq.h complete(&cq->free); cq 187 include/linux/mlx5/cq.h int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 189 include/linux/mlx5/cq.h int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); cq 190 include/linux/mlx5/cq.h int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 192 include/linux/mlx5/cq.h int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq 195 include/linux/mlx5/cq.h struct mlx5_core_cq *cq, u16 cq_period, cq 203 include/linux/mlx5/cq.h int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); cq 204 include/linux/mlx5/cq.h void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); cq 46 include/net/xdp_sock.h struct xsk_queue *cq; cq 721 include/rdma/ib_verbs.h struct ib_cq *cq; cq 1031 include/rdma/ib_verbs.h struct ib_cq *cq; cq 1321 include/rdma/ib_verbs.h void (*done)(struct ib_cq *cq, struct ib_wc *wc); cq 1533 include/rdma/ib_verbs.h typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); cq 1574 include/rdma/ib_verbs.h struct ib_cq *cq; cq 1616 include/rdma/ib_verbs.h struct ib_cq *cq; cq 1635 include/rdma/ib_verbs.h struct ib_cq *cq; cq 2270 include/rdma/ib_verbs.h int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc); cq 2271 include/rdma/ib_verbs.h int (*peek_cq)(struct ib_cq *cq, int wc_cnt); cq 2272 include/rdma/ib_verbs.h int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags); cq 2273 include/rdma/ib_verbs.h int (*req_ncomp_notif)(struct ib_cq *cq, int wc_cnt); cq 2389 include/rdma/ib_verbs.h int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, cq 2391 include/rdma/ib_verbs.h int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); cq 2392 include/rdma/ib_verbs.h void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); cq 2393 include/rdma/ib_verbs.h int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); cq 3789 include/rdma/ib_verbs.h void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); cq 3797 include/rdma/ib_verbs.h static inline void ib_free_cq(struct ib_cq *cq) cq 3799 include/rdma/ib_verbs.h ib_free_cq_user(cq, NULL); cq 3802 include/rdma/ib_verbs.h int ib_process_cq_direct(struct ib_cq *cq, int budget); cq 3833 include/rdma/ib_verbs.h int ib_resize_cq(struct ib_cq *cq, int cqe); cq 3842 include/rdma/ib_verbs.h int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period); cq 3849 include/rdma/ib_verbs.h int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); cq 3857 include/rdma/ib_verbs.h static inline void ib_destroy_cq(struct ib_cq *cq) cq 3859 include/rdma/ib_verbs.h ib_destroy_cq_user(cq, NULL); cq 3874 include/rdma/ib_verbs.h static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, cq 3877 include/rdma/ib_verbs.h return cq->device->ops.poll_cq(cq, num_entries, wc); cq 3907 include/rdma/ib_verbs.h static inline int ib_req_notify_cq(struct ib_cq *cq, cq 3910 include/rdma/ib_verbs.h return cq->device->ops.req_notify_cq(cq, flags); cq 3920 include/rdma/ib_verbs.h static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) cq 3922 include/rdma/ib_verbs.h return cq->device->ops.req_ncomp_notif ? cq 3923 include/rdma/ib_verbs.h cq->device->ops.req_ncomp_notif(cq, wc_cnt) : cq 110 include/rdma/rdmavt_cq.h bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited); cq 801 include/rdma/rdmavt_qp.h struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq); cq 803 include/rdma/rdmavt_qp.h if (unlikely(!rvt_cq_enter(cq, wc, solicited))) cq 821 include/rdma/rdmavt_qp.h struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); cq 823 include/rdma/rdmavt_qp.h if (unlikely(!rvt_cq_enter(cq, wc, solicited))) cq 938 include/rdma/rdmavt_qp.h struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); cq 941 include/rdma/rdmavt_qp.h RDMA_READ_UAPI_ATOMIC(cq->queue->tail) : cq 954 include/rdma/rdmavt_qp.h struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); cq 957 include/rdma/rdmavt_qp.h RDMA_READ_UAPI_ATOMIC(cq->queue->head) : cq 1357 kernel/locking/lockdep.c static inline void __cq_init(struct circular_queue *cq) cq 1359 kernel/locking/lockdep.c cq->front = cq->rear = 0; cq 1363 kernel/locking/lockdep.c static inline int __cq_empty(struct circular_queue *cq) cq 1365 kernel/locking/lockdep.c return (cq->front == cq->rear); cq 1368 kernel/locking/lockdep.c static inline int __cq_full(struct circular_queue *cq) cq 1370 kernel/locking/lockdep.c return ((cq->rear + 1) & CQ_MASK) == cq->front; cq 1373 kernel/locking/lockdep.c static inline int __cq_enqueue(struct circular_queue *cq, struct lock_list *elem) cq 1375 kernel/locking/lockdep.c if (__cq_full(cq)) cq 1378 kernel/locking/lockdep.c cq->element[cq->rear] = elem; cq 1379 kernel/locking/lockdep.c cq->rear = (cq->rear + 1) & CQ_MASK; cq 1387 kernel/locking/lockdep.c static inline struct lock_list * __cq_dequeue(struct circular_queue *cq) cq 1391 kernel/locking/lockdep.c if (__cq_empty(cq)) cq 1394 kernel/locking/lockdep.c lock = cq->element[cq->front]; cq 1395 kernel/locking/lockdep.c cq->front = (cq->front + 1) & CQ_MASK; cq 1400 kernel/locking/lockdep.c static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) cq 1402 kernel/locking/lockdep.c return (cq->rear - cq->front) & CQ_MASK; cq 1469 kernel/locking/lockdep.c struct circular_queue *cq = &lock_cq; cq 1482 kernel/locking/lockdep.c __cq_init(cq); cq 1483 kernel/locking/lockdep.c __cq_enqueue(cq, source_entry); cq 1485 kernel/locking/lockdep.c while ((lock = __cq_dequeue(cq))) { cq 1506 kernel/locking/lockdep.c if (__cq_enqueue(cq, entry)) { cq 1510 kernel/locking/lockdep.c cq_depth = __cq_get_elem_count(cq); cq 82 net/9p/trans_rdma.c struct ib_cq *cq; cq 290 net/9p/trans_rdma.c recv_done(struct ib_cq *cq, struct ib_wc *wc) cq 292 net/9p/trans_rdma.c struct p9_client *client = cq->cq_context; cq 341 net/9p/trans_rdma.c send_done(struct ib_cq *cq, struct ib_wc *wc) cq 343 net/9p/trans_rdma.c struct p9_client *client = cq->cq_context; cq 373 net/9p/trans_rdma.c if (rdma->cq && !IS_ERR(rdma->cq)) cq 374 net/9p/trans_rdma.c ib_free_cq(rdma->cq); cq 688 net/9p/trans_rdma.c rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, cq 691 net/9p/trans_rdma.c if (IS_ERR(rdma->cq)) cq 709 net/9p/trans_rdma.c qp_attr.send_cq = rdma->cq; cq 710 net/9p/trans_rdma.c qp_attr.recv_cq = rdma->cq; cq 1380 net/ipv4/ipconfig.c char *cp, *cq; cq 1382 net/ipv4/ipconfig.c cp = cq = name; cq 1386 net/ipv4/ipconfig.c if (cp == cq || cp - cq > 3) cq 1392 net/ipv4/ipconfig.c cq = cp; cq 280 net/rds/ib_cm.c static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) cq 285 net/rds/ib_cm.c rdsdebug("conn %p cq %p\n", conn, cq); cq 292 net/rds/ib_cm.c static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, cq 298 net/rds/ib_cm.c while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { cq 336 net/rds/ib_cm.c static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, cq 343 net/rds/ib_cm.c while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { cq 408 net/rds/ib_cm.c static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) cq 413 net/rds/ib_cm.c rdsdebug("conn %p cq %p\n", conn, cq); cq 959 net/sunrpc/cache.c struct cache_queue *cq; cq 971 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; cq 972 net/sunrpc/cache.c cq = list_entry(cq->list.next, struct cache_queue, list)) cq 973 net/sunrpc/cache.c if (!cq->reader) { cq 987 net/sunrpc/cache.c struct cache_queue *cq; cq 997 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; cq 998 net/sunrpc/cache.c cq = list_entry(cq->list.next, struct cache_queue, list)) cq 999 net/sunrpc/cache.c if (!cq->reader) { cq 1001 net/sunrpc/cache.c container_of(cq, struct cache_request, q); cq 1045 net/sunrpc/cache.c struct cache_queue *cq; cq 1046 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; cq 1047 net/sunrpc/cache.c cq = list_entry(cq->list.next, struct cache_queue, list)) cq 1048 net/sunrpc/cache.c if (!cq->reader) { cq 1049 net/sunrpc/cache.c container_of(cq, struct cache_request, q) cq 1074 net/sunrpc/cache.c struct cache_queue *cq, *tmp; cq 1080 net/sunrpc/cache.c list_for_each_entry_safe(cq, tmp, &detail->queue, list) cq 1081 net/sunrpc/cache.c if (!cq->reader) { cq 1082 net/sunrpc/cache.c cr = container_of(cq, struct cache_request, q); cq 401 net/sunrpc/xprtrdma/frwr_ops.c static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) cq 483 net/sunrpc/xprtrdma/frwr_ops.c static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) cq 502 net/sunrpc/xprtrdma/frwr_ops.c static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) cq 604 net/sunrpc/xprtrdma/frwr_ops.c static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) cq 111 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc); cq 305 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) cq 307 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c struct svcxprt_rdma *rdma = cq->cq_context; cq 19 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc); cq 20 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc); cq 202 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) cq 260 net/sunrpc/xprtrdma/svc_rdma_rw.c static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) cq 117 net/sunrpc/xprtrdma/svc_rdma_sendto.c static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc); cq 258 net/sunrpc/xprtrdma/svc_rdma_sendto.c static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) cq 260 net/sunrpc/xprtrdma/svc_rdma_sendto.c struct svcxprt_rdma *rdma = cq->cq_context; cq 132 net/sunrpc/xprtrdma/verbs.c rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) cq 150 net/sunrpc/xprtrdma/verbs.c rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) cq 242 net/xdp/xdp_umem.c if (umem->cq) { cq 243 net/xdp/xdp_umem.c xskq_destroy(umem->cq); cq 244 net/xdp/xdp_umem.c umem->cq = NULL; cq 462 net/xdp/xdp_umem.c return umem->fq && umem->cq; cq 270 net/xdp/xsk.c xskq_produce_flush_addr_n(umem->cq, nb_entries); cq 295 net/xdp/xsk.c if (xskq_produce_addr_lazy(umem->cq, desc->addr)) cq 333 net/xdp/xsk.c WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr)); cq 374 net/xdp/xsk.c if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) { cq 694 net/xdp/xsk.c xskq_set_umem(xs->umem->cq, xs->umem->size, cq 821 net/xdp/xsk.c &xs->umem->cq; cq 990 net/xdp/xsk.c q = READ_ONCE(umem->cq); cq 72 net/xdp/xsk_diag.c if (!err && umem->cq) { cq 73 net/xdp/xsk_diag.c err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING, cq 82 samples/bpf/xdpsock_user.c struct xsk_ring_cons cq; cq 300 samples/bpf/xdpsock_user.c ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, cq 504 samples/bpf/xdpsock_user.c rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq); cq 520 samples/bpf/xdpsock_user.c *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++); cq 523 samples/bpf/xdpsock_user.c xsk_ring_cons__release(&xsk->umem->cq, rcvd); cq 540 samples/bpf/xdpsock_user.c rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx); cq 542 samples/bpf/xdpsock_user.c xsk_ring_cons__release(&xsk->umem->cq, rcvd); cq 47 tools/io_uring/liburing.h struct io_uring_cq cq; cq 83 tools/io_uring/liburing.h struct io_uring_cq *cq = &ring->cq; cq 85 tools/io_uring/liburing.h (*cq->khead)++; cq 14 tools/io_uring/queue.c struct io_uring_cq *cq = &ring->cq; cq 15 tools/io_uring/queue.c const unsigned mask = *cq->kring_mask; cq 20 tools/io_uring/queue.c head = *cq->khead; cq 30 tools/io_uring/queue.c if (head != *cq->ktail) { cq 31 tools/io_uring/queue.c *cqe_ptr = &cq->cqes[head & mask]; cq 11 tools/io_uring/setup.c struct io_uring_sq *sq, struct io_uring_cq *cq) cq 41 tools/io_uring/setup.c cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); cq 42 tools/io_uring/setup.c ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, cq 49 tools/io_uring/setup.c cq->khead = ptr + p->cq_off.head; cq 50 tools/io_uring/setup.c cq->ktail = ptr + p->cq_off.tail; cq 51 tools/io_uring/setup.c cq->kring_mask = ptr + p->cq_off.ring_mask; cq 52 tools/io_uring/setup.c cq->kring_entries = ptr + p->cq_off.ring_entries; cq 53 tools/io_uring/setup.c cq->koverflow = ptr + p->cq_off.overflow; cq 54 tools/io_uring/setup.c cq->cqes = ptr + p->cq_off.cqes; cq 69 tools/io_uring/setup.c ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq); cq 101 tools/io_uring/setup.c struct io_uring_cq *cq = &ring->cq; cq 105 tools/io_uring/setup.c munmap(cq->khead, cq->ring_sz);