mcq 90 drivers/infiniband/hw/mlx4/cq.c return get_sw_cqe(cq, cq->mcq.cons_index); mcq 95 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *mcq = to_mcq(cq); mcq 98 drivers/infiniband/hw/mlx4/cq.c return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); mcq 224 drivers/infiniband/hw/mlx4/cq.c cq->mcq.usage = MLX4_RES_USAGE_USER_VERBS; mcq 230 drivers/infiniband/hw/mlx4/cq.c cq->mcq.set_ci_db = cq->db.db; mcq 231 drivers/infiniband/hw/mlx4/cq.c cq->mcq.arm_db = cq->db.db + 1; mcq 232 drivers/infiniband/hw/mlx4/cq.c *cq->mcq.set_ci_db = 0; mcq 233 drivers/infiniband/hw/mlx4/cq.c *cq->mcq.arm_db = 0; mcq 242 drivers/infiniband/hw/mlx4/cq.c cq->mcq.usage = MLX4_RES_USAGE_DRIVER; mcq 249 drivers/infiniband/hw/mlx4/cq.c &cq->mcq, vector, 0, mcq 257 drivers/infiniband/hw/mlx4/cq.c cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; mcq 259 drivers/infiniband/hw/mlx4/cq.c cq->mcq.comp = mlx4_ib_cq_comp; mcq 260 drivers/infiniband/hw/mlx4/cq.c cq->mcq.event = mlx4_ib_cq_event; mcq 263 drivers/infiniband/hw/mlx4/cq.c if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { mcq 271 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_free(dev->dev, &cq->mcq); mcq 348 drivers/infiniband/hw/mlx4/cq.c i = cq->mcq.cons_index; mcq 352 drivers/infiniband/hw/mlx4/cq.c return i - cq->mcq.cons_index; mcq 362 drivers/infiniband/hw/mlx4/cq.c i = cq->mcq.cons_index; mcq 377 drivers/infiniband/hw/mlx4/cq.c ++cq->mcq.cons_index; mcq 424 drivers/infiniband/hw/mlx4/cq.c err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); mcq 481 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_cq *mcq = to_mcq(cq); mcq 483 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_free(dev->dev, &mcq->mcq); mcq 484 drivers/infiniband/hw/mlx4/cq.c mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); mcq 492 drivers/infiniband/hw/mlx4/cq.c &mcq->db); mcq 494 drivers/infiniband/hw/mlx4/cq.c mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); mcq 495 drivers/infiniband/hw/mlx4/cq.c mlx4_db_free(dev->dev, &mcq->db); mcq 497 drivers/infiniband/hw/mlx4/cq.c ib_umem_release(mcq->umem); mcq 685 drivers/infiniband/hw/mlx4/cq.c ++cq->mcq.cons_index; mcq 900 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_set_ci(&cq->mcq); mcq 910 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_arm(&to_mcq(ibcq)->mcq, mcq 934 drivers/infiniband/hw/mlx4/cq.c for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) mcq 935 drivers/infiniband/hw/mlx4/cq.c if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) mcq 942 drivers/infiniband/hw/mlx4/cq.c while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { mcq 962 drivers/infiniband/hw/mlx4/cq.c cq->mcq.cons_index += nfreed; mcq 968 drivers/infiniband/hw/mlx4/cq.c mlx4_cq_set_ci(&cq->mcq); mcq 3110 drivers/infiniband/hw/mlx4/main.c struct mlx4_cq *mcq; mcq 3124 drivers/infiniband/hw/mlx4/main.c if (send_mcq->mcq.comp && mcq 3126 drivers/infiniband/hw/mlx4/main.c if (!send_mcq->mcq.reset_notify_added) { mcq 3127 drivers/infiniband/hw/mlx4/main.c send_mcq->mcq.reset_notify_added = 1; mcq 3128 drivers/infiniband/hw/mlx4/main.c list_add_tail(&send_mcq->mcq.reset_notify, mcq 3142 drivers/infiniband/hw/mlx4/main.c if (recv_mcq->mcq.comp && mcq 3144 drivers/infiniband/hw/mlx4/main.c if (!recv_mcq->mcq.reset_notify_added) { mcq 3145 drivers/infiniband/hw/mlx4/main.c recv_mcq->mcq.reset_notify_added = 1; mcq 3146 drivers/infiniband/hw/mlx4/main.c list_add_tail(&recv_mcq->mcq.reset_notify, mcq 3157 drivers/infiniband/hw/mlx4/main.c list_for_each_entry(mcq, &cq_notify_list, reset_notify) { mcq 3158 drivers/infiniband/hw/mlx4/main.c mcq->comp(mcq); mcq 117 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_cq mcq; mcq 666 drivers/infiniband/hw/mlx4/mlx4_ib.h static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) mcq 668 drivers/infiniband/hw/mlx4/mlx4_ib.h return container_of(mcq, struct mlx4_ib_cq, mcq); mcq 866 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_cq *mcq; mcq 968 drivers/infiniband/hw/mlx4/qp.c mcq = to_mcq(init_attr->send_cq); mcq 969 drivers/infiniband/hw/mlx4/qp.c list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); mcq 970 drivers/infiniband/hw/mlx4/qp.c mcq = to_mcq(init_attr->recv_cq); mcq 971 drivers/infiniband/hw/mlx4/qp.c list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); mcq 1002 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_cq *mcq; mcq 1255 drivers/infiniband/hw/mlx4/qp.c mcq = to_mcq(init_attr->send_cq); mcq 1256 drivers/infiniband/hw/mlx4/qp.c list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); mcq 1257 drivers/infiniband/hw/mlx4/qp.c mcq = to_mcq(init_attr->recv_cq); mcq 1258 drivers/infiniband/hw/mlx4/qp.c list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); mcq 1323 drivers/infiniband/hw/mlx4/qp.c } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { mcq 1338 drivers/infiniband/hw/mlx4/qp.c } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { mcq 2384 drivers/infiniband/hw/mlx4/qp.c context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); mcq 2385 drivers/infiniband/hw/mlx4/qp.c context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); mcq 4441 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_cq *mcq = to_mcq(cq); mcq 4447 drivers/infiniband/hw/mlx4/qp.c if (!mcq->mcq.reset_notify_added) mcq 4448 drivers/infiniband/hw/mlx4/qp.c mcq->mcq.reset_notify_added = 1; mcq 4471 drivers/infiniband/hw/mlx4/qp.c mcq->mcq.comp(&mcq->mcq); mcq 175 drivers/infiniband/hw/mlx4/srq.c to_mcq(init_attr->ext.cq)->mcq.cqn : 0; mcq 47 drivers/infiniband/hw/mlx5/cq.c static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) mcq 49 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); mcq 56 drivers/infiniband/hw/mlx5/cq.c type, mcq->cqn); mcq 83 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; mcq 95 drivers/infiniband/hw/mlx5/cq.c return get_sw_cqe(cq, cq->mcq.cons_index); mcq 462 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; mcq 464 drivers/infiniband/hw/mlx5/cq.c ++cq->mcq.cons_index; mcq 521 drivers/infiniband/hw/mlx5/cq.c "Requestor" : "Responder", cq->mcq.cqn); mcq 557 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn, mr->sig->err_item.key, mcq 582 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn); mcq 626 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_set_ci(&cq->mcq); mcq 649 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_arm(&cq->mcq, mcq 652 drivers/infiniband/hw/mlx5/cq.c uar_page, to_mcq(ibcq)->mcq.cons_index); mcq 857 drivers/infiniband/hw/mlx5/cq.c cq->mcq.set_ci_db = cq->db.db; mcq 858 drivers/infiniband/hw/mlx5/cq.c cq->mcq.arm_db = cq->db.db + 1; mcq 859 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqe_sz = cqe_size; mcq 981 drivers/infiniband/hw/mlx5/cq.c err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); mcq 985 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); mcq 986 drivers/infiniband/hw/mlx5/cq.c cq->mcq.irqn = irqn; mcq 988 drivers/infiniband/hw/mlx5/cq.c cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; mcq 990 drivers/infiniband/hw/mlx5/cq.c cq->mcq.comp = mlx5_ib_cq_comp; mcq 991 drivers/infiniband/hw/mlx5/cq.c cq->mcq.event = mlx5_ib_cq_event; mcq 996 drivers/infiniband/hw/mlx5/cq.c if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { mcq 1006 drivers/infiniband/hw/mlx5/cq.c mlx5_core_destroy_cq(dev->mdev, &cq->mcq); mcq 1020 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *mcq = to_mcq(cq); mcq 1022 drivers/infiniband/hw/mlx5/cq.c mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); mcq 1024 drivers/infiniband/hw/mlx5/cq.c destroy_cq_user(mcq, udata); mcq 1026 drivers/infiniband/hw/mlx5/cq.c destroy_cq_kernel(dev, mcq); mcq 1051 drivers/infiniband/hw/mlx5/cq.c for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) mcq 1052 drivers/infiniband/hw/mlx5/cq.c if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) mcq 1058 drivers/infiniband/hw/mlx5/cq.c while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { mcq 1060 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; mcq 1067 drivers/infiniband/hw/mlx5/cq.c dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; mcq 1069 drivers/infiniband/hw/mlx5/cq.c memcpy(dest, cqe, cq->mcq.cqe_sz); mcq 1076 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cons_index += nfreed; mcq 1081 drivers/infiniband/hw/mlx5/cq.c mlx5_cq_set_ci(&cq->mcq); mcq 1098 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_cq *mcq = to_mcq(cq); mcq 1107 drivers/infiniband/hw/mlx5/cq.c err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, mcq 1110 drivers/infiniband/hw/mlx5/cq.c mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); mcq 1194 drivers/infiniband/hw/mlx5/cq.c i = cq->mcq.cons_index; mcq 1221 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqn); mcq 1225 drivers/infiniband/hw/mlx5/cq.c ++cq->mcq.cons_index; mcq 1314 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); mcq 1316 drivers/infiniband/hw/mlx5/cq.c err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); mcq 579 drivers/infiniband/hw/mlx5/devx.c to_mcq(uobj->object)->mcq.cqn) == mcq 1374 drivers/infiniband/hw/mlx5/devx.c static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) mcq 1376 drivers/infiniband/hw/mlx5/devx.c struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq); mcq 1380 drivers/infiniband/hw/mlx5/devx.c u32 obj_id = mcq->cqn; mcq 4471 drivers/infiniband/hw/mlx5/main.c struct mlx5_core_cq *mcq; mcq 4486 drivers/infiniband/hw/mlx5/main.c if (send_mcq->mcq.comp && mcq 4488 drivers/infiniband/hw/mlx5/main.c if (!send_mcq->mcq.reset_notify_added) { mcq 4489 drivers/infiniband/hw/mlx5/main.c send_mcq->mcq.reset_notify_added = 1; mcq 4490 drivers/infiniband/hw/mlx5/main.c list_add_tail(&send_mcq->mcq.reset_notify, mcq 4503 drivers/infiniband/hw/mlx5/main.c if (recv_mcq->mcq.comp && mcq 4505 drivers/infiniband/hw/mlx5/main.c if (!recv_mcq->mcq.reset_notify_added) { mcq 4506 drivers/infiniband/hw/mlx5/main.c recv_mcq->mcq.reset_notify_added = 1; mcq 4507 drivers/infiniband/hw/mlx5/main.c list_add_tail(&recv_mcq->mcq.reset_notify, mcq 4520 drivers/infiniband/hw/mlx5/main.c list_for_each_entry(mcq, &cq_armed_list, reset_notify) { mcq 4521 drivers/infiniband/hw/mlx5/main.c mcq->comp(mcq, NULL); mcq 505 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_core_cq mcq; mcq 1005 drivers/infiniband/hw/mlx5/mlx5_ib.h static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) mcq 1007 drivers/infiniband/hw/mlx5/mlx5_ib.h return container_of(mcq, struct mlx5_ib_cq, mcq); mcq 2223 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); mcq 2224 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn); mcq 2229 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn); mcq 2244 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); mcq 2247 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn); mcq 2335 drivers/infiniband/hw/mlx5/qp.c if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { mcq 2339 drivers/infiniband/hw/mlx5/qp.c } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { mcq 2365 drivers/infiniband/hw/mlx5/qp.c if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { mcq 2368 drivers/infiniband/hw/mlx5/qp.c } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { mcq 2574 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn); mcq 2725 drivers/infiniband/hw/mlx5/qp.c init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, mcq 2726 drivers/infiniband/hw/mlx5/qp.c init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); mcq 3567 drivers/infiniband/hw/mlx5/qp.c context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; mcq 3568 drivers/infiniband/hw/mlx5/qp.c context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; mcq 5942 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn); mcq 6403 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_cq *mcq = to_mcq(cq); mcq 6409 drivers/infiniband/hw/mlx5/qp.c if (!mcq->mcq.reset_notify_added) mcq 6410 drivers/infiniband/hw/mlx5/qp.c mcq->mcq.reset_notify_added = 1; mcq 6433 drivers/infiniband/hw/mlx5/qp.c mcq->mcq.comp(&mcq->mcq, NULL); mcq 295 drivers/infiniband/hw/mlx5/srq.c in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; mcq 297 drivers/infiniband/hw/mlx5/srq.c in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; mcq 63 drivers/net/ethernet/mellanox/mlx4/cq.c struct mlx4_cq *mcq, *temp; mcq 69 drivers/net/ethernet/mellanox/mlx4/cq.c list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { mcq 70 drivers/net/ethernet/mellanox/mlx4/cq.c list_del_init(&mcq->tasklet_ctx.list); mcq 71 drivers/net/ethernet/mellanox/mlx4/cq.c mcq->tasklet_ctx.comp(mcq); mcq 72 drivers/net/ethernet/mellanox/mlx4/cq.c if (refcount_dec_and_test(&mcq->refcount)) mcq 73 drivers/net/ethernet/mellanox/mlx4/cq.c complete(&mcq->free); mcq 98 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.set_ci_db = cq->wqres.db.db; mcq 99 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.arm_db = cq->wqres.db.db + 1; mcq 100 drivers/net/ethernet/mellanox/mlx4/en_cq.c *cq->mcq.set_ci_db = 0; mcq 101 drivers/net/ethernet/mellanox/mlx4/en_cq.c *cq->mcq.arm_db = 0; mcq 140 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.usage = MLX4_RES_USAGE_DRIVER; mcq 142 drivers/net/ethernet/mellanox/mlx4/en_cq.c &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, mcq 147 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.event = mlx4_en_cq_event; mcq 151 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.comp = mlx4_en_tx_irq; mcq 157 drivers/net/ethernet/mellanox/mlx4/en_cq.c cq->mcq.comp = mlx4_en_rx_irq; mcq 199 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_cq_free(priv->mdev->dev, &cq->mcq); mcq 205 drivers/net/ethernet/mellanox/mlx4/en_cq.c return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, mcq 211 drivers/net/ethernet/mellanox/mlx4/en_cq.c mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, mcq 1682 drivers/net/ethernet/mellanox/mlx4/en_netdev.c priv->rx_ring[i]->cqn = cq->mcq.cqn; mcq 1734 drivers/net/ethernet/mellanox/mlx4/en_netdev.c cq->mcq.cqn, mcq 341 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; mcq 691 drivers/net/ethernet/mellanox/mlx4/en_rx.c index = cq->mcq.cons_index & ring->size_mask; mcq 696 drivers/net/ethernet/mellanox/mlx4/en_rx.c cq->mcq.cons_index & cq->size)) { mcq 897 drivers/net/ethernet/mellanox/mlx4/en_rx.c ++cq->mcq.cons_index; mcq 898 drivers/net/ethernet/mellanox/mlx4/en_rx.c index = (cq->mcq.cons_index) & ring->size_mask; mcq 912 drivers/net/ethernet/mellanox/mlx4/en_rx.c mlx4_cq_set_ci(&cq->mcq); mcq 914 drivers/net/ethernet/mellanox/mlx4/en_rx.c ring->cons = cq->mcq.cons_index; mcq 924 drivers/net/ethernet/mellanox/mlx4/en_rx.c void mlx4_en_rx_irq(struct mlx4_cq *mcq) mcq 926 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); mcq 399 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_cq *mcq = &cq->mcq; mcq 405 drivers/net/ethernet/mellanox/mlx4/en_tx.c u32 cons_index = mcq->cons_index; mcq 486 drivers/net/ethernet/mellanox/mlx4/en_tx.c mcq->cons_index = cons_index; mcq 487 drivers/net/ethernet/mellanox/mlx4/en_tx.c mlx4_cq_set_ci(mcq); mcq 510 drivers/net/ethernet/mellanox/mlx4/en_tx.c void mlx4_en_tx_irq(struct mlx4_cq *mcq) mcq 512 drivers/net/ethernet/mellanox/mlx4/en_tx.c struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); mcq 366 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h struct mlx4_cq mcq; mcq 699 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_tx_irq(struct mlx4_cq *mcq); mcq 762 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h void mlx4_en_rx_irq(struct mlx4_cq *mcq); mcq 51 drivers/net/ethernet/mellanox/mlx5/core/cq.c struct mlx5_core_cq *mcq; mcq 58 drivers/net/ethernet/mellanox/mlx5/core/cq.c list_for_each_entry_safe(mcq, temp, &ctx->process_list, mcq 60 drivers/net/ethernet/mellanox/mlx5/core/cq.c list_del_init(&mcq->tasklet_ctx.list); mcq 61 drivers/net/ethernet/mellanox/mlx5/core/cq.c mcq->tasklet_ctx.comp(mcq, NULL); mcq 62 drivers/net/ethernet/mellanox/mlx5/core/cq.c mlx5_cq_put(mcq); mcq 325 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_core_cq mcq; mcq 914 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); mcq 915 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); mcq 45 drivers/net/ethernet/mellanox/mlx5/core/en/health.c err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out)); mcq 56 drivers/net/ethernet/mellanox/mlx5/core/en/health.c err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); mcq 196 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c eq = rq->cq.mcq.eq; mcq 214 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn); mcq 105 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c eq = sq->cq.mcq.eq; mcq 123 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, mcq 144 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5_core_cq *mcq; mcq 146 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mcq = &cq->mcq; mcq 147 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); mcq 38 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) mcq 40 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); mcq 51 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); mcq 61 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); mcq 525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c &c->sq[tc].cq.mcq, mcq 530 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, mcq 696 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); mcq 1326 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; mcq 1424 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; mcq 1473 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; mcq 1537 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_cq *mcq = &cq->mcq; mcq 1552 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->cqe_sz = 64; mcq 1553 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->set_ci_db = cq->wq_ctrl.db.db; mcq 1554 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->arm_db = cq->wq_ctrl.db.db + 1; mcq 1555 drivers/net/ethernet/mellanox/mlx5/core/en_main.c *mcq->set_ci_db = 0; mcq 1556 drivers/net/ethernet/mellanox/mlx5/core/en_main.c *mcq->arm_db = 0; mcq 1557 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->vector = param->eq_ix; mcq 1558 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->comp = mlx5e_completion_event; mcq 1559 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->event = mlx5e_cq_error_event; mcq 1560 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->irqn = irqn; mcq 1601 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_core_cq *mcq = &cq->mcq; mcq 1634 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); mcq 1648 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_core_destroy_cq(cq->mdev, &cq->mcq); mcq 1666 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); mcq 413 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->cq.mcq.cqn, ci, sq->sqn, mcq 199 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) mcq 201 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); mcq 208 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) mcq 210 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); mcq 215 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c __func__, mcq->cqn, event); mcq 361 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, mcq 365 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c static void mlx5_fpga_conn_cq_event(struct mlx5_core_cq *mcq, mcq 370 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); mcq 371 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_warn(conn->fdev, "CQ event %u on CQ #%u\n", event, mcq->cqn); mcq 417 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq, mcq 422 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); mcq 483 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out)); mcq 489 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.cqe_sz = 64; mcq 490 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.set_ci_db = conn->cq.wq_ctrl.db.db; mcq 491 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.arm_db = conn->cq.wq_ctrl.db.db + 1; mcq 492 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->cq.mcq.set_ci_db = 0; mcq 493 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->cq.mcq.arm_db = 0; mcq 494 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.vector = 0; mcq 495 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete; mcq 496 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.event = mlx5_fpga_conn_cq_event; mcq 497 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.irqn = irqn; mcq 498 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.uar = fdev->conn_res.uar; mcq 502 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn); mcq 516 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); mcq 594 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); mcq 595 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); mcq 699 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); mcq 700 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); mcq 981 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c synchronize_irq(conn->cq.mcq.irqn); mcq 58 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h struct mlx5_core_cq mcq; mcq 686 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_cq_event(struct mlx5_core_cq *mcq, mcq 689 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn); mcq 692 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_cq_complete(struct mlx5_core_cq *mcq, mcq 695 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c pr_err("CQ completion CQ: #%u\n", mcq->cqn); mcq 758 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.event = dr_cq_event; mcq 759 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.comp = dr_cq_complete; mcq 761 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); mcq 767 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.cqe_sz = 64; mcq 768 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.set_ci_db = cq->wq_ctrl.db.db; mcq 769 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; mcq 770 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *cq->mcq.set_ci_db = 0; mcq 775 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *cq->mcq.arm_db = cpu_to_be32(2 << 28); mcq 777 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.vector = 0; mcq 778 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.irqn = irqn; mcq 779 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.uar = uar; mcq 792 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_core_destroy_cq(mdev, &cq->mcq); mcq 881 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c init_attr.cqn = dmn->send_ring->cq->mcq.cqn; mcq 996 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5_core_cq mcq; mcq 7819 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *mcq; mcq 7829 drivers/scsi/lpfc/lpfc_sli.c mcq = phba->sli4_hba.mbx_cq; mcq 7830 drivers/scsi/lpfc/lpfc_sli.c idx = mcq->hba_index; mcq 7831 drivers/scsi/lpfc/lpfc_sli.c qe_valid = mcq->qe_valid; mcq 7833 drivers/scsi/lpfc/lpfc_sli.c (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { mcq 7834 drivers/scsi/lpfc/lpfc_sli.c mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); mcq 7840 drivers/scsi/lpfc/lpfc_sli.c idx = (idx + 1) % mcq->entry_count; mcq 7841 drivers/scsi/lpfc/lpfc_sli.c if (mcq->hba_index == idx)