Home
last modified time | relevance | path

Searched refs:eqe (Results 1 – 19 of 19) sorted by relevance

/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Deq.c123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); in next_eqe_sw() local
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw()
129 struct mlx4_eqe *eqe = in next_slave_event_eqe() local
131 return (!!(eqe->owner & 0x80) ^ in next_slave_event_eqe()
133 eqe : NULL; in next_slave_event_eqe()
146 struct mlx4_eqe *eqe; in mlx4_gen_slave_eqe() local
150 for (eqe = next_slave_event_eqe(slave_eq); eqe; in mlx4_gen_slave_eqe()
151 eqe = next_slave_event_eqe(slave_eq)) { in mlx4_gen_slave_eqe()
152 slave = eqe->slave_id; in mlx4_gen_slave_eqe()
157 if (mlx4_GEN_EQE(dev, i, eqe)) in mlx4_gen_slave_eqe()
[all …]
Dresource_tracker.c3085 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe) in mlx4_GEN_EQE() argument
3104 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type]; in mlx4_GEN_EQE()
3127 if (eqe->type == MLX4_EVENT_TYPE_CMD) { in mlx4_GEN_EQE()
3129 eqe->event.cmd.token = cpu_to_be16(event_eq->token); in mlx4_GEN_EQE()
3132 memcpy(mailbox->buf, (u8 *) eqe, 28); in mlx4_GEN_EQE()
Dmlx4.h1159 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Deq.c112 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw() local
114 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw()
199 struct mlx5_eqe *eqe; in mlx5_eq_int() local
206 while ((eqe = next_eqe_sw(eq))) { in mlx5_eq_int()
214 eq->eqn, eqe_type_str(eqe->type)); in mlx5_eq_int()
215 switch (eqe->type) { in mlx5_eq_int()
217 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; in mlx5_eq_int()
229 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in mlx5_eq_int()
231 eqe_type_str(eqe->type), eqe->type, rsn); in mlx5_eq_int()
232 mlx5_rsc_event(dev, rsn, eqe->type); in mlx5_eq_int()
[all …]
Dqp.c92 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) in mlx5_eq_pagefault() argument
94 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault; in mlx5_eq_pagefault()
107 pfault.event_subtype = eqe->sub_type; in mlx5_eq_pagefault()
115 eqe->sub_type, pfault.flags); in mlx5_eq_pagefault()
117 switch (eqe->sub_type) { in mlx5_eq_pagefault()
159 eqe->sub_type, qpn); in mlx5_eq_pagefault()
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_irq.c206 static void qp_event_callback(struct ehca_shca *shca, u64 eqe, in qp_event_callback() argument
210 u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); in qp_event_callback()
241 u64 eqe) in cq_event_callback() argument
244 u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe); in cq_event_callback()
263 static void parse_identifier(struct ehca_shca *shca, u64 eqe) in parse_identifier() argument
265 u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe); in parse_identifier()
269 qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0); in parse_identifier()
272 qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0); in parse_identifier()
275 qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0); in parse_identifier()
279 qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1); in parse_identifier()
[all …]
Dehca_eq.c158 void *eqe; in ehca_poll_eq() local
161 eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue); in ehca_poll_eq()
164 return eqe; in ehca_poll_eq()
Dehca_classes.h72 struct ehca_eqe *eqe; member
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_eq.c236 struct mthca_eqe *eqe; in next_eqe_sw() local
237 eqe = get_eqe(eq, eq->cons_index); in next_eqe_sw()
238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; in next_eqe_sw()
241 static inline void set_eqe_hw(struct mthca_eqe *eqe) in set_eqe_hw() argument
243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; in set_eqe_hw()
262 struct mthca_eqe *eqe; in mthca_eq_int() local
267 while ((eqe = next_eqe_sw(eq))) { in mthca_eq_int()
274 switch (eqe->type) { in mthca_eq_int()
276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int()
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
[all …]
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dmad.c60 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.bl… argument
61 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_e… argument
972 struct mlx4_eqe *eqe) in propagate_pkey_ev() argument
974 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), in propagate_pkey_ev()
975 GET_MASK_FROM_EQE(eqe)); in propagate_pkey_ev()
1035 struct mlx4_eqe *eqe = &(ew->ib_eqe); in handle_port_mgmt_change_event() local
1036 u8 port = eqe->event.port_mgmt_change.port; in handle_port_mgmt_change_event()
1041 switch (eqe->subtype) { in handle_port_mgmt_change_event()
1043 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); in handle_port_mgmt_change_event()
1048 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); in handle_port_mgmt_change_event()
[all …]
Dmain.c2728 struct mlx4_eqe *eqe = NULL; in mlx4_ib_event() local
2745 eqe = (struct mlx4_eqe *)param; in mlx4_ib_event()
2781 memcpy(&ew->ib_eqe, eqe, sizeof *eqe); in mlx4_ib_event()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
Dehea_main.c948 struct ehea_eqe *eqe; in ehea_qp_aff_irq_handler() local
954 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
956 while (eqe) { in ehea_qp_aff_irq_handler()
957 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
959 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
973 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
1161 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) in ehea_parse_eqe() argument
1169 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); in ehea_parse_eqe()
1170 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); in ehea_parse_eqe()
1182 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { in ehea_parse_eqe()
[all …]
Dehea_qmr.c325 struct ehea_eqe *eqe; in ehea_poll_eq() local
329 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); in ehea_poll_eq()
332 return eqe; in ehea_poll_eq()
/linux-4.1.27/drivers/scsi/be2iscsi/
Dbe_main.c841 struct be_eq_entry *eqe = NULL; in be_isr_mcc() local
852 eqe = queue_tail_node(eq); in be_isr_mcc()
856 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_mcc()
858 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr_mcc()
865 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_mcc()
867 eqe = queue_tail_node(eq); in be_isr_mcc()
886 struct be_eq_entry *eqe = NULL; in be_isr_msix() local
895 eqe = queue_tail_node(eq); in be_isr_msix()
899 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_msix()
904 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_msix()
[all …]
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma_hw.c933 struct ocrdma_eqe eqe; in ocrdma_irq_handler() local
941 eqe = *ptr; in ocrdma_irq_handler()
942 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); in ocrdma_irq_handler()
943 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) in ocrdma_irq_handler()
947 eq->q.id, eqe.id_valid); in ocrdma_irq_handler()
948 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) in ocrdma_irq_handler()
955 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { in ocrdma_irq_handler()
956 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; in ocrdma_irq_handler()
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_sli.c249 struct lpfc_eqe *eqe; in lpfc_sli4_eq_get() local
255 eqe = q->qe[q->hba_index].eqe; in lpfc_sli4_eq_get()
258 if (!bf_get_le32(lpfc_eqe_valid, eqe)) in lpfc_sli4_eq_get()
276 return eqe; in lpfc_sli4_eq_get()
326 temp_eqe = q->qe[q->host_index].eqe; in lpfc_sli4_eq_release()
6747 struct lpfc_eqe *eqe; in lpfc_sli4_process_missed_mbox_completions() local
6781 while ((eqe = lpfc_sli4_eq_get(fpeq))) { in lpfc_sli4_process_missed_mbox_completions()
6782 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); in lpfc_sli4_process_missed_mbox_completions()
8846 struct lpfc_eqe *eqe; in lpfc_sli_issue_iocb() local
8876 while ((eqe = lpfc_sli4_eq_get(fpeq))) { in lpfc_sli_issue_iocb()
[all …]
Dlpfc_sli4.h115 struct lpfc_eqe *eqe; member
/linux-4.1.27/include/linux/mlx5/
Ddriver.h717 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
Dbe_main.c2193 struct be_eq_entry *eqe; in events_get() local
2197 eqe = queue_tail_node(&eqo->q); in events_get()
2198 if (eqe->evt == 0) in events_get()
2202 eqe->evt = 0; in events_get()