aenq              660 drivers/infiniband/hw/efa/efa_admin_cmds_defs.h 		struct efa_admin_feature_aenq_desc aenq;
aenq              681 drivers/infiniband/hw/efa/efa_admin_cmds_defs.h 		struct efa_admin_feature_aenq_desc aenq;
aenq              214 drivers/infiniband/hw/efa/efa_com.c 	struct efa_com_aenq *aenq = &edev->aenq;
aenq              223 drivers/infiniband/hw/efa/efa_com.c 	size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
aenq              224 drivers/infiniband/hw/efa/efa_com.c 	aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
aenq              226 drivers/infiniband/hw/efa/efa_com.c 	if (!aenq->entries)
aenq              229 drivers/infiniband/hw/efa/efa_com.c 	aenq->aenq_handlers = aenq_handlers;
aenq              230 drivers/infiniband/hw/efa/efa_com.c 	aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
aenq              231 drivers/infiniband/hw/efa/efa_com.c 	aenq->cc = 0;
aenq              232 drivers/infiniband/hw/efa/efa_com.c 	aenq->phase = 1;
aenq              234 drivers/infiniband/hw/efa/efa_com.c 	addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
aenq              235 drivers/infiniband/hw/efa/efa_com.c 	addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
aenq              240 drivers/infiniband/hw/efa/efa_com.c 	aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq              244 drivers/infiniband/hw/efa/efa_com.c 	aenq_caps |= (aenq->msix_vector_idx
aenq              253 drivers/infiniband/hw/efa/efa_com.c 	writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
aenq              676 drivers/infiniband/hw/efa/efa_com.c 	struct efa_com_aenq *aenq = &edev->aenq;
aenq              692 drivers/infiniband/hw/efa/efa_com.c 	size = aenq->depth * sizeof(*aenq->entries);
aenq              693 drivers/infiniband/hw/efa/efa_com.c 	dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
aenq              832 drivers/infiniband/hw/efa/efa_com.c 	struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
aenq              850 drivers/infiniband/hw/efa/efa_com.c 	struct efa_com_aenq *aenq = &edev->aenq;
aenq              857 drivers/infiniband/hw/efa/efa_com.c 	ci = aenq->cc & (aenq->depth - 1);
aenq              858 drivers/infiniband/hw/efa/efa_com.c 	phase = aenq->phase;
aenq              859 drivers/infiniband/hw/efa/efa_com.c 	aenq_e = &aenq->entries[ci]; /* Get first entry */
aenq              880 drivers/infiniband/hw/efa/efa_com.c 		if (ci == aenq->depth) {
aenq              884 drivers/infiniband/hw/efa/efa_com.c 		aenq_e = &aenq->entries[ci];
aenq              888 drivers/infiniband/hw/efa/efa_com.c 	aenq->cc += processed;
aenq              889 drivers/infiniband/hw/efa/efa_com.c 	aenq->phase = phase;
aenq              896 drivers/infiniband/hw/efa/efa_com.c 	writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
aenq              104 drivers/infiniband/hw/efa/efa_com.h 	struct efa_com_aenq aenq;
aenq              603 drivers/infiniband/hw/efa/efa_com_cmd.c 		  get_resp.u.aenq.supported_groups,
aenq              604 drivers/infiniband/hw/efa/efa_com_cmd.c 		  get_resp.u.aenq.enabled_groups);
aenq              606 drivers/infiniband/hw/efa/efa_com_cmd.c 	if ((get_resp.u.aenq.supported_groups & groups) != groups) {
aenq              610 drivers/infiniband/hw/efa/efa_com_cmd.c 			groups, get_resp.u.aenq.supported_groups);
aenq              614 drivers/infiniband/hw/efa/efa_com_cmd.c 	cmd.u.aenq.enabled_groups = groups;
aenq              457 drivers/infiniband/hw/efa/efa_main.c 	edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
aenq              908 drivers/net/ethernet/amazon/ena/ena_admin_defs.h 		struct ena_admin_feature_aenq_desc aenq;
aenq              943 drivers/net/ethernet/amazon/ena/ena_admin_defs.h 		struct ena_admin_feature_aenq_desc aenq;
aenq              153 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_com_aenq *aenq = &dev->aenq;
aenq              157 drivers/net/ethernet/amazon/ena/ena_com.c 	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
aenq              159 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
aenq              162 drivers/net/ethernet/amazon/ena/ena_com.c 	if (!aenq->entries) {
aenq              167 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->head = aenq->q_depth;
aenq              168 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->phase = 1;
aenq              170 drivers/net/ethernet/amazon/ena/ena_com.c 	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
aenq              171 drivers/net/ethernet/amazon/ena/ena_com.c 	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
aenq              177 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq              188 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->aenq_handlers = aenq_handlers;
aenq             1509 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 depth = ena_dev->aenq.q_depth;
aenq             1511 drivers/net/ethernet/amazon/ena/ena_com.c 	WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
aenq             1533 drivers/net/ethernet/amazon/ena/ena_com.c 	if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
aenq             1535 drivers/net/ethernet/amazon/ena/ena_com.c 			get_resp.u.aenq.supported_groups, groups_flag);
aenq             1545 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd.u.aenq.enabled_groups = groups_flag;
aenq             1636 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_com_aenq *aenq = &ena_dev->aenq;
aenq             1654 drivers/net/ethernet/amazon/ena/ena_com.c 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
aenq             1655 drivers/net/ethernet/amazon/ena/ena_com.c 	if (ena_dev->aenq.entries)
aenq             1656 drivers/net/ethernet/amazon/ena/ena_com.c 		dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
aenq             1657 drivers/net/ethernet/amazon/ena/ena_com.c 				  aenq->dma_addr);
aenq             1658 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->entries = NULL;
aenq             1951 drivers/net/ethernet/amazon/ena/ena_com.c 	memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
aenq             1952 drivers/net/ethernet/amazon/ena/ena_com.c 	       sizeof(get_resp.u.aenq));
aenq             1999 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
aenq             2015 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_com_aenq *aenq  = &dev->aenq;
aenq             2021 drivers/net/ethernet/amazon/ena/ena_com.c 	masked_head = aenq->head & (aenq->q_depth - 1);
aenq             2022 drivers/net/ethernet/amazon/ena/ena_com.c 	phase = aenq->phase;
aenq             2023 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq_e = &aenq->entries[masked_head]; /* Get first entry */
aenq             2049 drivers/net/ethernet/amazon/ena/ena_com.c 		if (unlikely(masked_head == aenq->q_depth)) {
aenq             2053 drivers/net/ethernet/amazon/ena/ena_com.c 		aenq_e = &aenq->entries[masked_head];
aenq             2057 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->head += processed;
aenq             2058 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->phase = phase;
aenq             2066 drivers/net/ethernet/amazon/ena/ena_com.c 	writel_relaxed((u32)aenq->head,
aenq              321 drivers/net/ethernet/amazon/ena/ena_com.h 	struct ena_com_aenq aenq;
aenq              358 drivers/net/ethernet/amazon/ena/ena_com.h 	struct ena_admin_feature_aenq_desc aenq;
aenq             2668 drivers/net/ethernet/amazon/ena/ena_netdev.c 	aenq_groups &= get_feat_ctx->aenq.supported_groups;