cqes              337 drivers/infiniband/hw/cxgb4/restrack.c 		       struct t4_cqe *cqes)
cqes              342 drivers/infiniband/hw/cxgb4/restrack.c 	if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
cqes              345 drivers/infiniband/hw/cxgb4/restrack.c 	if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
cqes              354 drivers/infiniband/hw/cxgb4/restrack.c 		       struct t4_cqe *cqes)
cqes              362 drivers/infiniband/hw/cxgb4/restrack.c 	if (fill_cqe(msg, cqes, idx, "swcq_idx"))
cqes              367 drivers/infiniband/hw/cxgb4/restrack.c 	if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
cqes             1425 drivers/net/ethernet/broadcom/cnic.c 				   struct kcqe *cqes[], u32 num_cqes)
cqes             1434 drivers/net/ethernet/broadcom/cnic.c 					  cqes, num_cqes);
cqes             1552 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             1582 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             1583 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
cqes             1881 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             1935 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             1936 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
cqes             2000 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2033 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             2034 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
cqes             2229 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2235 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             2236 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
cqes             2244 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2250 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             2251 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
cqes             2354 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2427 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             2428 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
cqes             2501 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2534 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = (struct kcqe *) &kcqe;
cqes             2535 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
cqes             2585 drivers/net/ethernet/broadcom/cnic.c 	struct kcqe *cqes[1];
cqes             2650 drivers/net/ethernet/broadcom/cnic.c 	cqes[0] = &kcqe;
cqes             2651 drivers/net/ethernet/broadcom/cnic.c 	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
cqes              369 drivers/net/ethernet/broadcom/cnic_if.h 	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
cqes              440 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	sq->stats->cqes += i;
cqes              210 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		s->rx_xdp_tx_cqe   += xdpsq_stats->cqes;
cqes              239 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		s->tx_xdp_cqes    += xdpsq_red_stats->cqes;
cqes              265 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 		s->tx_xsk_cqes                   += xsksq_stats->cqes;
cqes              299 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 			s->tx_cqes		+= sq_stats->cqes;
cqes             1492 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
cqes             1504 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
cqes             1514 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
cqes             1545 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c 	{ MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
cqes              290 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h 	u64 cqes ____cacheline_aligned_in_smp;
cqes              303 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h 	u64 cqes ____cacheline_aligned_in_smp;
cqes              515 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	stats->cqes += i;
cqes              169 drivers/nvme/host/pci.c 	volatile struct nvme_completion *cqes;
cqes              927 drivers/nvme/host/pci.c 	return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
cqes              949 drivers/nvme/host/pci.c 	volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
cqes             1003 drivers/nvme/host/pci.c 		if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
cqes             1347 drivers/nvme/host/pci.c 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
cqes             1486 drivers/nvme/host/pci.c 	nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
cqes             1488 drivers/nvme/host/pci.c 	if (!nvmeq->cqes)
cqes             1506 drivers/nvme/host/pci.c 	dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
cqes             1535 drivers/nvme/host/pci.c 	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
cqes              350 drivers/nvme/target/admin-cmd.c 	id->cqes = (0x4 << 4) | 0x4;
cqes              156 fs/io_uring.c  	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
cqes              567 fs/io_uring.c  	return &rings->cqes[tail & ctx->cq_mask];
cqes             3361 fs/io_uring.c  	off = struct_size(rings, cqes, cq_entries);
cqes             3958 fs/io_uring.c  	p->cq_off.cqes = offsetof(struct io_rings, cqes);
cqes              259 include/linux/nvme.h 	__u8			cqes;
cqes              114 include/uapi/linux/io_uring.h 	__u32 cqes;
cqes               51 tools/io_uring/io_uring-bench.c 	struct io_uring_cqe *cqes;
cqes              257 tools/io_uring/io_uring-bench.c 		cqe = &ring->cqes[head & cq_ring_mask];
cqes              449 tools/io_uring/io_uring-bench.c 	ptr = mmap(0, p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe),
cqes              457 tools/io_uring/io_uring-bench.c 	cring->cqes = ptr + p.cq_off.cqes;
cqes               40 tools/io_uring/liburing.h 	struct io_uring_cqe *cqes;
cqes               31 tools/io_uring/queue.c 			*cqe_ptr = &cq->cqes[head & mask];
cqes               41 tools/io_uring/setup.c 	cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
cqes               54 tools/io_uring/setup.c 	cq->cqes = ptr + p->cq_off.cqes;