cqe_sz 149 drivers/crypto/hisilicon/qm.c #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ cqe_sz 153 drivers/crypto/hisilicon/qm.c ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) cqe_sz 155 drivers/crypto/hisilicon/qm.c #define QM_MK_CQC_DW3_V2(cqe_sz) \ cqe_sz 156 drivers/crypto/hisilicon/qm.c ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) cqe_sz 3645 drivers/infiniband/hw/bnxt_re/ib_verbs.c resp.cqe_sz = sizeof(struct cq_base); cqe_sz 83 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cqe_sz 462 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cqe_sz 859 drivers/infiniband/hw/mlx5/cq.c cq->mcq.cqe_sz = cqe_size; cqe_sz 970 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(cqc, cqc, cqe_sz, cqe_sz 1060 drivers/infiniband/hw/mlx5/cq.c cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cqe_sz 1067 drivers/infiniband/hw/mlx5/cq.c dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; cqe_sz 1069 drivers/infiniband/hw/mlx5/cq.c memcpy(dest, cqe, cq->mcq.cqe_sz); cqe_sz 1307 drivers/infiniband/hw/mlx5/cq.c MLX5_SET(cqc, cqc, cqe_sz, cqe_sz 660 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) cqe_sz 662 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h return buf + idx * cqe_sz; cqe_sz 1552 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mcq->cqe_sz = 64; cqe_sz 2235 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); cqe_sz 489 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->cq.mcq.cqe_sz = 64; cqe_sz 767 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cq->mcq.cqe_sz = 64; cqe_sz 171 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7; cqe_sz 42 include/linux/mlx5/cq.h int cqe_sz; cqe_sz 3526 include/linux/mlx5/mlx5_ifc.h u8 cqe_sz[0x3]; cqe_sz 59 include/uapi/rdma/bnxt_re-abi.h __u32 cqe_sz;