/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | cq.c | 47 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type) in mlx5_ib_cq_event() argument 49 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() 56 type, mcq->cqn); in mlx5_ib_cq_event() 75 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); in get_cqe() 88 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 100 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 433 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one() 435 ++cq->mcq.cons_index; in mlx5_poll_one() 464 cq->mcq.cqn, qpn); in mlx5_poll_one() 498 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one() [all …]
|
D | mlx5_ib.h | 273 struct mlx5_core_cq mcq; member 436 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) in to_mibcq() argument 438 return container_of(mcq, struct mlx5_ib_cq, mcq); in to_mibcq()
|
D | qp.c | 992 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 993 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 998 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in create_qp_common() 1014 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); in create_qp_common() 1017 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); in create_qp_common() 1053 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 1057 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 1083 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 1086 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 1278 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, in mlx5_ib_create_qp() [all …]
|
D | srq.c | 295 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn); in mlx5_ib_create_srq() 298 in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn); in mlx5_ib_create_srq()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_txrx.c | 86 void mlx5e_completion_event(struct mlx5_core_cq *mcq) in mlx5e_completion_event() argument 88 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_completion_event() 96 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) in mlx5e_cq_error_event() argument 98 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_cq_error_event() 104 __func__, mcq->cqn, event); in mlx5e_cq_error_event()
|
D | en.h | 300 struct mlx5_core_cq mcq; member 563 void mlx5e_completion_event(struct mlx5_core_cq *mcq); 564 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 620 struct mlx5_core_cq *mcq; in mlx5e_cq_arm() local 622 mcq = &cq->mcq; in mlx5e_cq_arm() 623 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); in mlx5e_cq_arm()
|
D | en_main.c | 393 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); in mlx5e_enable_rq() 622 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); in mlx5e_enable_sq() 747 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5e_create_cq() local 766 mcq->cqe_sz = 64; in mlx5e_create_cq() 767 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5e_create_cq() 768 mcq->arm_db = cq->wq_ctrl.db.db + 1; in mlx5e_create_cq() 769 *mcq->set_ci_db = 0; in mlx5e_create_cq() 770 *mcq->arm_db = 0; in mlx5e_create_cq() 771 mcq->vector = param->eq_ix; in mlx5e_create_cq() 772 mcq->comp = mlx5e_completion_event; in mlx5e_create_cq() [all …]
|
D | en_ethtool.c | 429 &c->sq[tc].cq.mcq, in mlx5e_set_coalesce() 434 mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, in mlx5e_set_coalesce()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | cq.c | 89 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw() 94 struct mlx4_ib_cq *mcq = to_mcq(cq); in mlx4_ib_modify_cq() local 97 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); in mlx4_ib_modify_cq() 226 cq->mcq.set_ci_db = cq->db.db; in mlx4_ib_create_cq() 227 cq->mcq.arm_db = cq->db.db + 1; in mlx4_ib_create_cq() 228 *cq->mcq.set_ci_db = 0; in mlx4_ib_create_cq() 229 *cq->mcq.arm_db = 0; in mlx4_ib_create_cq() 242 cq->db.dma, &cq->mcq, vector, 0, in mlx4_ib_create_cq() 248 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp; in mlx4_ib_create_cq() 250 cq->mcq.comp = mlx4_ib_cq_comp; in mlx4_ib_create_cq() [all …]
|
D | mlx4_ib.h | 118 struct mlx4_cq mcq; member 635 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) in to_mibcq() argument 637 return container_of(mcq, struct mlx4_ib_cq, mcq); in to_mibcq()
|
D | main.c | 2661 struct mlx4_cq *mcq; in mlx4_ib_handle_catas_error() local 2675 if (send_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 2677 if (!send_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 2678 send_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 2679 list_add_tail(&send_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 2693 if (recv_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 2695 if (!recv_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 2696 recv_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 2697 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 2708 list_for_each_entry(mcq, &cq_notify_list, reset_notify) { in mlx4_ib_handle_catas_error() [all …]
|
D | qp.c | 643 struct mlx4_ib_cq *mcq; in create_qp_common() local 872 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 873 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common() 874 mcq = to_mcq(init_attr->recv_cq); in create_qp_common() 875 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common() 939 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs() 954 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs() 1706 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); in __mlx4_ib_modify_qp() 1707 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); in __mlx4_ib_modify_qp()
|
D | srq.c | 186 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0; in mlx4_ib_create_srq()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | en_cq.c | 107 cq->mcq.set_ci_db = cq->wqres.db.db; in mlx4_en_activate_cq() 108 cq->mcq.arm_db = cq->wqres.db.db + 1; in mlx4_en_activate_cq() 109 *cq->mcq.set_ci_db = 0; in mlx4_en_activate_cq() 110 *cq->mcq.arm_db = 0; in mlx4_en_activate_cq() 150 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, in mlx4_en_activate_cq() 155 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; in mlx4_en_activate_cq() 156 cq->mcq.event = mlx4_en_cq_event; in mlx4_en_activate_cq() 203 mlx4_cq_free(priv->mdev->dev, &cq->mcq); in mlx4_en_deactivate_cq() 209 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq, in mlx4_en_set_cq_moder() 215 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, in mlx4_en_arm_cq()
|
D | cq.c | 63 struct mlx4_cq *mcq, *temp; in mlx4_cq_tasklet_cb() local 69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { in mlx4_cq_tasklet_cb() 70 list_del_init(&mcq->tasklet_ctx.list); in mlx4_cq_tasklet_cb() 71 mcq->tasklet_ctx.comp(mcq); in mlx4_cq_tasklet_cb() 72 if (atomic_dec_and_test(&mcq->refcount)) in mlx4_cq_tasklet_cb() 73 complete(&mcq->free); in mlx4_cq_tasklet_cb()
|
D | en_tx.c | 389 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq() local 396 u32 cons_index = mcq->cons_index; in mlx4_en_process_tx_cq() 475 mcq->cons_index = cons_index; in mlx4_en_process_tx_cq() 476 mlx4_cq_set_ci(mcq); in mlx4_en_process_tx_cq() 495 void mlx4_en_tx_irq(struct mlx4_cq *mcq) in mlx4_en_tx_irq() argument 497 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq()
|
D | en_rx.c | 439 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; in mlx4_en_activate_rx_rings() 771 index = cq->mcq.cons_index & ring->size_mask; in mlx4_en_process_rx_cq() 776 cq->mcq.cons_index & cq->size)) { in mlx4_en_process_rx_cq() 1004 ++cq->mcq.cons_index; in mlx4_en_process_rx_cq() 1005 index = (cq->mcq.cons_index) & ring->size_mask; in mlx4_en_process_rx_cq() 1013 mlx4_cq_set_ci(&cq->mcq); in mlx4_en_process_rx_cq() 1015 ring->cons = cq->mcq.cons_index; in mlx4_en_process_rx_cq() 1022 void mlx4_en_rx_irq(struct mlx4_cq *mcq) in mlx4_en_rx_irq() argument 1024 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_rx_irq()
|
D | mlx4_en.h | 336 struct mlx4_cq mcq; member 763 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 809 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
|
D | en_netdev.c | 1595 priv->rx_ring[i]->cqn = cq->mcq.cqn; in mlx4_en_start_port() 1641 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, in mlx4_en_start_port()
|
/linux-4.4.14/drivers/scsi/lpfc/ |
D | lpfc_sli.c | 6704 struct lpfc_queue *mcq; in lpfc_sli4_mbox_completions_pending() local 6713 mcq = phba->sli4_hba.mbx_cq; in lpfc_sli4_mbox_completions_pending() 6714 idx = mcq->hba_index; in lpfc_sli4_mbox_completions_pending() 6715 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { in lpfc_sli4_mbox_completions_pending() 6716 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; in lpfc_sli4_mbox_completions_pending() 6722 idx = (idx + 1) % mcq->entry_count; in lpfc_sli4_mbox_completions_pending() 6723 if (mcq->hba_index == idx) in lpfc_sli4_mbox_completions_pending()
|