1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #ifndef __MLX5_EN_STATS_H__
33 #define __MLX5_EN_STATS_H__
34
35 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
36 (*(u64 *)((char *)ptr + dsc[i].offset))
37 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset))
41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
43
44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
47 #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld)
48 #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld)
49 #define MLX5E_DECLARE_XSKRQ_STAT(type, fld) "rx%d_xsk_"#fld, offsetof(type, fld)
50 #define MLX5E_DECLARE_XSKSQ_STAT(type, fld) "tx%d_xsk_"#fld, offsetof(type, fld)
51 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
52
53 struct counter_desc {
54 char format[ETH_GSTRING_LEN];
55 size_t offset;
56 };
57
58 struct mlx5e_sw_stats {
59 u64 rx_packets;
60 u64 rx_bytes;
61 u64 tx_packets;
62 u64 tx_bytes;
63 u64 tx_tso_packets;
64 u64 tx_tso_bytes;
65 u64 tx_tso_inner_packets;
66 u64 tx_tso_inner_bytes;
67 u64 tx_added_vlan_packets;
68 u64 tx_nop;
69 u64 rx_lro_packets;
70 u64 rx_lro_bytes;
71 u64 rx_ecn_mark;
72 u64 rx_removed_vlan_packets;
73 u64 rx_csum_unnecessary;
74 u64 rx_csum_none;
75 u64 rx_csum_complete;
76 u64 rx_csum_complete_tail;
77 u64 rx_csum_complete_tail_slow;
78 u64 rx_csum_unnecessary_inner;
79 u64 rx_xdp_drop;
80 u64 rx_xdp_redirect;
81 u64 rx_xdp_tx_xmit;
82 u64 rx_xdp_tx_mpwqe;
83 u64 rx_xdp_tx_inlnw;
84 u64 rx_xdp_tx_nops;
85 u64 rx_xdp_tx_full;
86 u64 rx_xdp_tx_err;
87 u64 rx_xdp_tx_cqe;
88 u64 tx_csum_none;
89 u64 tx_csum_partial;
90 u64 tx_csum_partial_inner;
91 u64 tx_queue_stopped;
92 u64 tx_queue_dropped;
93 u64 tx_xmit_more;
94 u64 tx_recover;
95 u64 tx_cqes;
96 u64 tx_queue_wake;
97 u64 tx_cqe_err;
98 u64 tx_xdp_xmit;
99 u64 tx_xdp_mpwqe;
100 u64 tx_xdp_inlnw;
101 u64 tx_xdp_nops;
102 u64 tx_xdp_full;
103 u64 tx_xdp_err;
104 u64 tx_xdp_cqes;
105 u64 rx_wqe_err;
106 u64 rx_mpwqe_filler_cqes;
107 u64 rx_mpwqe_filler_strides;
108 u64 rx_oversize_pkts_sw_drop;
109 u64 rx_buff_alloc_err;
110 u64 rx_cqe_compress_blks;
111 u64 rx_cqe_compress_pkts;
112 u64 rx_cache_reuse;
113 u64 rx_cache_full;
114 u64 rx_cache_empty;
115 u64 rx_cache_busy;
116 u64 rx_cache_waive;
117 u64 rx_congst_umr;
118 u64 rx_arfs_err;
119 u64 rx_recover;
120 u64 ch_events;
121 u64 ch_poll;
122 u64 ch_arm;
123 u64 ch_aff_change;
124 u64 ch_force_irq;
125 u64 ch_eq_rearm;
126
127 #ifdef CONFIG_MLX5_EN_TLS
128 u64 tx_tls_encrypted_packets;
129 u64 tx_tls_encrypted_bytes;
130 u64 tx_tls_ctx;
131 u64 tx_tls_ooo;
132 u64 tx_tls_dump_packets;
133 u64 tx_tls_dump_bytes;
134 u64 tx_tls_resync_bytes;
135 u64 tx_tls_skip_no_sync_data;
136 u64 tx_tls_drop_no_sync_data;
137 u64 tx_tls_drop_bypass_req;
138 #endif
139
140 u64 rx_xsk_packets;
141 u64 rx_xsk_bytes;
142 u64 rx_xsk_csum_complete;
143 u64 rx_xsk_csum_unnecessary;
144 u64 rx_xsk_csum_unnecessary_inner;
145 u64 rx_xsk_csum_none;
146 u64 rx_xsk_ecn_mark;
147 u64 rx_xsk_removed_vlan_packets;
148 u64 rx_xsk_xdp_drop;
149 u64 rx_xsk_xdp_redirect;
150 u64 rx_xsk_wqe_err;
151 u64 rx_xsk_mpwqe_filler_cqes;
152 u64 rx_xsk_mpwqe_filler_strides;
153 u64 rx_xsk_oversize_pkts_sw_drop;
154 u64 rx_xsk_buff_alloc_err;
155 u64 rx_xsk_cqe_compress_blks;
156 u64 rx_xsk_cqe_compress_pkts;
157 u64 rx_xsk_congst_umr;
158 u64 rx_xsk_arfs_err;
159 u64 tx_xsk_xmit;
160 u64 tx_xsk_mpwqe;
161 u64 tx_xsk_inlnw;
162 u64 tx_xsk_full;
163 u64 tx_xsk_err;
164 u64 tx_xsk_cqes;
165 };
166
167 struct mlx5e_qcounter_stats {
168 u32 rx_out_of_buffer;
169 u32 rx_if_down_packets;
170 };
171
172 struct mlx5e_vnic_env_stats {
173 __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
174 };
175
176 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
177 vstats->query_vport_out, c)
178
179 struct mlx5e_vport_stats {
180 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
181 };
182
183 #define PPORT_802_3_GET(pstats, c) \
184 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
185 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
186 #define PPORT_2863_GET(pstats, c) \
187 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
188 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
189 #define PPORT_2819_GET(pstats, c) \
190 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
191 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
192 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
193 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
194 counter_set.phys_layer_statistical_cntrs.c##_high)
195 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
196 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
197 counter_set.eth_per_prio_grp_data_layout.c##_high)
198 #define NUM_PPORT_PRIO 8
199 #define PPORT_ETH_EXT_GET(pstats, c) \
200 MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
201 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
202
203 struct mlx5e_pport_stats {
204 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
205 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
206 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
207 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
208 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
209 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
210 __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
211 __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
212 __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
213 };
214
215 #define PCIE_PERF_GET(pcie_stats, c) \
216 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
217 counter_set.pcie_perf_cntrs_grp_data_layout.c)
218
219 #define PCIE_PERF_GET64(pcie_stats, c) \
220 MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
221 counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
222
223 struct mlx5e_pcie_stats {
224 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
225 };
226
227 struct mlx5e_rq_stats {
228 u64 packets;
229 u64 bytes;
230 u64 csum_complete;
231 u64 csum_complete_tail;
232 u64 csum_complete_tail_slow;
233 u64 csum_unnecessary;
234 u64 csum_unnecessary_inner;
235 u64 csum_none;
236 u64 lro_packets;
237 u64 lro_bytes;
238 u64 ecn_mark;
239 u64 removed_vlan_packets;
240 u64 xdp_drop;
241 u64 xdp_redirect;
242 u64 wqe_err;
243 u64 mpwqe_filler_cqes;
244 u64 mpwqe_filler_strides;
245 u64 oversize_pkts_sw_drop;
246 u64 buff_alloc_err;
247 u64 cqe_compress_blks;
248 u64 cqe_compress_pkts;
249 u64 cache_reuse;
250 u64 cache_full;
251 u64 cache_empty;
252 u64 cache_busy;
253 u64 cache_waive;
254 u64 congst_umr;
255 u64 arfs_err;
256 u64 recover;
257 };
258
259 struct mlx5e_sq_stats {
260
261 u64 packets;
262 u64 bytes;
263 u64 xmit_more;
264 u64 tso_packets;
265 u64 tso_bytes;
266 u64 tso_inner_packets;
267 u64 tso_inner_bytes;
268 u64 csum_partial;
269 u64 csum_partial_inner;
270 u64 added_vlan_packets;
271 u64 nop;
272 #ifdef CONFIG_MLX5_EN_TLS
273 u64 tls_encrypted_packets;
274 u64 tls_encrypted_bytes;
275 u64 tls_ctx;
276 u64 tls_ooo;
277 u64 tls_dump_packets;
278 u64 tls_dump_bytes;
279 u64 tls_resync_bytes;
280 u64 tls_skip_no_sync_data;
281 u64 tls_drop_no_sync_data;
282 u64 tls_drop_bypass_req;
283 #endif
284
285 u64 csum_none;
286 u64 stopped;
287 u64 dropped;
288 u64 recover;
289
290 u64 cqes ____cacheline_aligned_in_smp;
291 u64 wake;
292 u64 cqe_err;
293 };
294
295 struct mlx5e_xdpsq_stats {
296 u64 xmit;
297 u64 mpwqe;
298 u64 inlnw;
299 u64 nops;
300 u64 full;
301 u64 err;
302
303 u64 cqes ____cacheline_aligned_in_smp;
304 };
305
306 struct mlx5e_ch_stats {
307 u64 events;
308 u64 poll;
309 u64 arm;
310 u64 aff_change;
311 u64 force_irq;
312 u64 eq_rearm;
313 };
314
315 struct mlx5e_stats {
316 struct mlx5e_sw_stats sw;
317 struct mlx5e_qcounter_stats qcnt;
318 struct mlx5e_vnic_env_stats vnic;
319 struct mlx5e_vport_stats vport;
320 struct mlx5e_pport_stats pport;
321 struct rtnl_link_stats64 vf_vport;
322 struct mlx5e_pcie_stats pcie;
323 };
324
325 enum {
326 MLX5E_NDO_UPDATE_STATS = BIT(0x1),
327 };
328
329 struct mlx5e_priv;
330 struct mlx5e_stats_grp {
331 u16 update_stats_mask;
332 int (*get_num_stats)(struct mlx5e_priv *priv);
333 int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
334 int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
335 void (*update_stats)(struct mlx5e_priv *priv);
336 };
337
338 extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
339 extern const int mlx5e_num_stats_grps;
340
341 void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
342
343 #endif