This source file includes following definitions.
- cqe_sz_to_mlx_sz
- mlx5_cq_set_ci
- mlx5_cq_arm
- mlx5_cq_hold
- mlx5_cq_put
- mlx5_dump_err_cqe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #ifndef MLX5_CORE_CQ_H
34 #define MLX5_CORE_CQ_H
35
36 #include <rdma/ib_verbs.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/refcount.h>
39
40 struct mlx5_core_cq {
41 u32 cqn;
42 int cqe_sz;
43 __be32 *set_ci_db;
44 __be32 *arm_db;
45 struct mlx5_uars_page *uar;
46 refcount_t refcount;
47 struct completion free;
48 unsigned vector;
49 unsigned int irqn;
50 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
51 void (*event) (struct mlx5_core_cq *, enum mlx5_event);
52 u32 cons_index;
53 unsigned arm_sn;
54 struct mlx5_rsc_debug *dbg;
55 int pid;
56 struct {
57 struct list_head list;
58 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
59 void *priv;
60 } tasklet_ctx;
61 int reset_notify_added;
62 struct list_head reset_notify;
63 struct mlx5_eq_comp *eq;
64 u16 uid;
65 };
66
67
68 enum {
69 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
70 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
71 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
72 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
73 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
74 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
75 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
76 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
77 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
78 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
79 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
80 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
81 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
82 };
83
84 enum {
85 MLX5_CQE_OWNER_MASK = 1,
86 MLX5_CQE_REQ = 0,
87 MLX5_CQE_RESP_WR_IMM = 1,
88 MLX5_CQE_RESP_SEND = 2,
89 MLX5_CQE_RESP_SEND_IMM = 3,
90 MLX5_CQE_RESP_SEND_INV = 4,
91 MLX5_CQE_RESIZE_CQ = 5,
92 MLX5_CQE_SIG_ERR = 12,
93 MLX5_CQE_REQ_ERR = 13,
94 MLX5_CQE_RESP_ERR = 14,
95 MLX5_CQE_INVALID = 15,
96 };
97
98 enum {
99 MLX5_CQ_MODIFY_PERIOD = 1 << 0,
100 MLX5_CQ_MODIFY_COUNT = 1 << 1,
101 MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
102 };
103
104 enum {
105 MLX5_CQ_OPMOD_RESIZE = 1,
106 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
107 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
108 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
109 };
110
111 struct mlx5_cq_modify_params {
112 int type;
113 union {
114 struct {
115 u32 page_offset;
116 u8 log_cq_size;
117 } resize;
118
119 struct {
120 } moder;
121
122 struct {
123 } mapping;
124 } params;
125 };
126
127 enum {
128 CQE_STRIDE_64 = 0,
129 CQE_STRIDE_128 = 1,
130 CQE_STRIDE_128_PAD = 2,
131 };
132
133 #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
134 #define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
135
136 static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
137 {
138 return padding_128_en ? CQE_STRIDE_128_PAD :
139 size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
140 }
141
142 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
143 {
144 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
145 }
146
147 enum {
148 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
149 MLX5_CQ_DB_REQ_NOT = 0 << 24
150 };
151
152 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
153 void __iomem *uar_page,
154 u32 cons_index)
155 {
156 __be32 doorbell[2];
157 u32 sn;
158 u32 ci;
159
160 sn = cq->arm_sn & 3;
161 ci = cons_index & 0xffffff;
162
163 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
164
165
166
167
168 wmb();
169
170 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
171 doorbell[1] = cpu_to_be32(cq->cqn);
172
173 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
174 }
175
176 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
177 {
178 refcount_inc(&cq->refcount);
179 }
180
181 static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
182 {
183 if (refcount_dec_and_test(&cq->refcount))
184 complete(&cq->free);
185 }
186
187 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
188 u32 *in, int inlen, u32 *out, int outlen);
189 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
190 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
191 u32 *out, int outlen);
192 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
193 u32 *in, int inlen);
194 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
195 struct mlx5_core_cq *cq, u16 cq_period,
196 u16 cq_max_count);
197 static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
198 struct mlx5_err_cqe *err_cqe)
199 {
200 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
201 sizeof(*err_cqe), false);
202 }
203 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
204 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
205
206 #endif