This source file includes following definitions.
- mlx5e_grp_sw_get_num_stats
- mlx5e_grp_sw_fill_strings
- mlx5e_grp_sw_fill_stats
- mlx5e_grp_sw_update_stats
- mlx5e_grp_q_get_num_stats
- mlx5e_grp_q_fill_strings
- mlx5e_grp_q_fill_stats
- mlx5e_grp_q_update_stats
- mlx5e_grp_vnic_env_get_num_stats
- mlx5e_grp_vnic_env_fill_strings
- mlx5e_grp_vnic_env_fill_stats
- mlx5e_grp_vnic_env_update_stats
- mlx5e_grp_vport_get_num_stats
- mlx5e_grp_vport_fill_strings
- mlx5e_grp_vport_fill_stats
- mlx5e_grp_vport_update_stats
- mlx5e_grp_802_3_get_num_stats
- mlx5e_grp_802_3_fill_strings
- mlx5e_grp_802_3_fill_stats
- mlx5e_grp_802_3_update_stats
- mlx5e_grp_2863_get_num_stats
- mlx5e_grp_2863_fill_strings
- mlx5e_grp_2863_fill_stats
- mlx5e_grp_2863_update_stats
- mlx5e_grp_2819_get_num_stats
- mlx5e_grp_2819_fill_strings
- mlx5e_grp_2819_fill_stats
- mlx5e_grp_2819_update_stats
- mlx5e_grp_phy_get_num_stats
- mlx5e_grp_phy_fill_strings
- mlx5e_grp_phy_fill_stats
- mlx5e_grp_phy_update_stats
- mlx5e_grp_eth_ext_get_num_stats
- mlx5e_grp_eth_ext_fill_strings
- mlx5e_grp_eth_ext_fill_stats
- mlx5e_grp_eth_ext_update_stats
- mlx5e_grp_pcie_get_num_stats
- mlx5e_grp_pcie_fill_strings
- mlx5e_grp_pcie_fill_stats
- mlx5e_grp_pcie_update_stats
- mlx5e_grp_per_tc_prio_get_num_stats
- mlx5e_grp_per_port_buffer_congest_fill_strings
- mlx5e_grp_per_port_buffer_congest_fill_stats
- mlx5e_grp_per_tc_prio_update_stats
- mlx5e_grp_per_tc_congest_prio_get_num_stats
- mlx5e_grp_per_tc_congest_prio_update_stats
- mlx5e_grp_per_port_buffer_congest_get_num_stats
- mlx5e_grp_per_port_buffer_congest_update_stats
- mlx5e_grp_per_prio_traffic_get_num_stats
- mlx5e_grp_per_prio_traffic_fill_strings
- mlx5e_grp_per_prio_traffic_fill_stats
- mlx5e_query_pfc_combined
- mlx5e_query_global_pause_combined
- mlx5e_grp_per_prio_pfc_get_num_stats
- mlx5e_grp_per_prio_pfc_fill_strings
- mlx5e_grp_per_prio_pfc_fill_stats
- mlx5e_grp_per_prio_get_num_stats
- mlx5e_grp_per_prio_fill_strings
- mlx5e_grp_per_prio_fill_stats
- mlx5e_grp_per_prio_update_stats
- mlx5e_grp_pme_get_num_stats
- mlx5e_grp_pme_fill_strings
- mlx5e_grp_pme_fill_stats
- mlx5e_grp_ipsec_get_num_stats
- mlx5e_grp_ipsec_fill_strings
- mlx5e_grp_ipsec_fill_stats
- mlx5e_grp_ipsec_update_stats
- mlx5e_grp_tls_get_num_stats
- mlx5e_grp_tls_fill_strings
- mlx5e_grp_tls_fill_stats
- mlx5e_grp_channels_get_num_stats
- mlx5e_grp_channels_fill_strings
- mlx5e_grp_channels_fill_stats
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include "lib/mlx5.h"
34 #include "en.h"
35 #include "en_accel/ipsec.h"
36 #include "en_accel/tls.h"
37
38 static const struct counter_desc sw_stats_desc[] = {
39 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
40 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
41 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
42 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
43 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
44 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
45 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
46 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
47 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
48 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
49
50 #ifdef CONFIG_MLX5_EN_TLS
51 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
52 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
53 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
54 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
55 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
56 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
57 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
61 #endif
62
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
70 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
71 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
72 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
76 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
77 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
78 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
79 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
83 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
84 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
85 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
86 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
87 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
88 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
89 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
90 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
91 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
132 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
133 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
145 };
146
147 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
148
149 static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
150 {
151 return NUM_SW_COUNTERS;
152 }
153
154 static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
155 {
156 int i;
157
158 for (i = 0; i < NUM_SW_COUNTERS; i++)
159 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
160 return idx;
161 }
162
163 static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
164 {
165 int i;
166
167 for (i = 0; i < NUM_SW_COUNTERS; i++)
168 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
169 return idx;
170 }
171
172 static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
173 {
174 struct mlx5e_sw_stats *s = &priv->stats.sw;
175 int i;
176
177 memset(s, 0, sizeof(*s));
178
179 for (i = 0; i < priv->max_nch; i++) {
180 struct mlx5e_channel_stats *channel_stats =
181 &priv->channel_stats[i];
182 struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
183 struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq;
184 struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq;
185 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
186 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
187 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
188 int j;
189
190 s->rx_packets += rq_stats->packets;
191 s->rx_bytes += rq_stats->bytes;
192 s->rx_lro_packets += rq_stats->lro_packets;
193 s->rx_lro_bytes += rq_stats->lro_bytes;
194 s->rx_ecn_mark += rq_stats->ecn_mark;
195 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
196 s->rx_csum_none += rq_stats->csum_none;
197 s->rx_csum_complete += rq_stats->csum_complete;
198 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
199 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
200 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
201 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
202 s->rx_xdp_drop += rq_stats->xdp_drop;
203 s->rx_xdp_redirect += rq_stats->xdp_redirect;
204 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
205 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
206 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
207 s->rx_xdp_tx_nops += xdpsq_stats->nops;
208 s->rx_xdp_tx_full += xdpsq_stats->full;
209 s->rx_xdp_tx_err += xdpsq_stats->err;
210 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
211 s->rx_wqe_err += rq_stats->wqe_err;
212 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
213 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
214 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
215 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
216 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
217 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
218 s->rx_cache_reuse += rq_stats->cache_reuse;
219 s->rx_cache_full += rq_stats->cache_full;
220 s->rx_cache_empty += rq_stats->cache_empty;
221 s->rx_cache_busy += rq_stats->cache_busy;
222 s->rx_cache_waive += rq_stats->cache_waive;
223 s->rx_congst_umr += rq_stats->congst_umr;
224 s->rx_arfs_err += rq_stats->arfs_err;
225 s->rx_recover += rq_stats->recover;
226 s->ch_events += ch_stats->events;
227 s->ch_poll += ch_stats->poll;
228 s->ch_arm += ch_stats->arm;
229 s->ch_aff_change += ch_stats->aff_change;
230 s->ch_force_irq += ch_stats->force_irq;
231 s->ch_eq_rearm += ch_stats->eq_rearm;
232
233 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
234 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
235 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
236 s->tx_xdp_nops += xdpsq_red_stats->nops;
237 s->tx_xdp_full += xdpsq_red_stats->full;
238 s->tx_xdp_err += xdpsq_red_stats->err;
239 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
240
241 s->rx_xsk_packets += xskrq_stats->packets;
242 s->rx_xsk_bytes += xskrq_stats->bytes;
243 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
244 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
245 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
246 s->rx_xsk_csum_none += xskrq_stats->csum_none;
247 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
248 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
249 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
250 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
251 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
252 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
253 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
254 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
255 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
256 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
257 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
258 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
259 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
260 s->tx_xsk_xmit += xsksq_stats->xmit;
261 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
262 s->tx_xsk_inlnw += xsksq_stats->inlnw;
263 s->tx_xsk_full += xsksq_stats->full;
264 s->tx_xsk_err += xsksq_stats->err;
265 s->tx_xsk_cqes += xsksq_stats->cqes;
266
267 for (j = 0; j < priv->max_opened_tc; j++) {
268 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
269
270 s->tx_packets += sq_stats->packets;
271 s->tx_bytes += sq_stats->bytes;
272 s->tx_tso_packets += sq_stats->tso_packets;
273 s->tx_tso_bytes += sq_stats->tso_bytes;
274 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
275 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
276 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
277 s->tx_nop += sq_stats->nop;
278 s->tx_queue_stopped += sq_stats->stopped;
279 s->tx_queue_wake += sq_stats->wake;
280 s->tx_queue_dropped += sq_stats->dropped;
281 s->tx_cqe_err += sq_stats->cqe_err;
282 s->tx_recover += sq_stats->recover;
283 s->tx_xmit_more += sq_stats->xmit_more;
284 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
285 s->tx_csum_none += sq_stats->csum_none;
286 s->tx_csum_partial += sq_stats->csum_partial;
287 #ifdef CONFIG_MLX5_EN_TLS
288 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
289 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
290 s->tx_tls_ctx += sq_stats->tls_ctx;
291 s->tx_tls_ooo += sq_stats->tls_ooo;
292 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
293 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
294 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
295 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
296 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
297 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
298 #endif
299 s->tx_cqes += sq_stats->cqes;
300
301
302 barrier();
303 }
304 }
305 }
306
307 static const struct counter_desc q_stats_desc[] = {
308 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
309 };
310
311 static const struct counter_desc drop_rq_stats_desc[] = {
312 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
313 };
314
315 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
316 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
317
318 static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
319 {
320 int num_stats = 0;
321
322 if (priv->q_counter)
323 num_stats += NUM_Q_COUNTERS;
324
325 if (priv->drop_rq_q_counter)
326 num_stats += NUM_DROP_RQ_COUNTERS;
327
328 return num_stats;
329 }
330
331 static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
332 {
333 int i;
334
335 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
336 strcpy(data + (idx++) * ETH_GSTRING_LEN,
337 q_stats_desc[i].format);
338
339 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
340 strcpy(data + (idx++) * ETH_GSTRING_LEN,
341 drop_rq_stats_desc[i].format);
342
343 return idx;
344 }
345
346 static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
347 {
348 int i;
349
350 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
351 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
352 q_stats_desc, i);
353 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
354 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
355 drop_rq_stats_desc, i);
356 return idx;
357 }
358
359 static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
360 {
361 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
362 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
363
364 if (priv->q_counter &&
365 !mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out,
366 sizeof(out)))
367 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
368 out, out_of_buffer);
369 if (priv->drop_rq_q_counter &&
370 !mlx5_core_query_q_counter(priv->mdev, priv->drop_rq_q_counter, 0,
371 out, sizeof(out)))
372 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out, out,
373 out_of_buffer);
374 }
375
376 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
377 static const struct counter_desc vnic_env_stats_steer_desc[] = {
378 { "rx_steer_missed_packets",
379 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
380 };
381
382 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
383 { "dev_internal_queue_oob",
384 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
385 };
386
387 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
388 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
389 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
390 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
391 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
392 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
393
394 static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
395 {
396 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
397 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
398 }
399
400 static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
401 int idx)
402 {
403 int i;
404
405 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
406 strcpy(data + (idx++) * ETH_GSTRING_LEN,
407 vnic_env_stats_steer_desc[i].format);
408
409 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
410 strcpy(data + (idx++) * ETH_GSTRING_LEN,
411 vnic_env_stats_dev_oob_desc[i].format);
412 return idx;
413 }
414
415 static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
416 int idx)
417 {
418 int i;
419
420 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
421 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
422 vnic_env_stats_steer_desc, i);
423
424 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
425 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
426 vnic_env_stats_dev_oob_desc, i);
427 return idx;
428 }
429
430 static void mlx5e_grp_vnic_env_update_stats(struct mlx5e_priv *priv)
431 {
432 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
433 int outlen = MLX5_ST_SZ_BYTES(query_vnic_env_out);
434 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {0};
435 struct mlx5_core_dev *mdev = priv->mdev;
436
437 if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
438 return;
439
440 MLX5_SET(query_vnic_env_in, in, opcode,
441 MLX5_CMD_OP_QUERY_VNIC_ENV);
442 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
443 MLX5_SET(query_vnic_env_in, in, other_vport, 0);
444 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
445 }
446
447 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
448 static const struct counter_desc vport_stats_desc[] = {
449 { "rx_vport_unicast_packets",
450 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
451 { "rx_vport_unicast_bytes",
452 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
453 { "tx_vport_unicast_packets",
454 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
455 { "tx_vport_unicast_bytes",
456 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
457 { "rx_vport_multicast_packets",
458 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
459 { "rx_vport_multicast_bytes",
460 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
461 { "tx_vport_multicast_packets",
462 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
463 { "tx_vport_multicast_bytes",
464 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
465 { "rx_vport_broadcast_packets",
466 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
467 { "rx_vport_broadcast_bytes",
468 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
469 { "tx_vport_broadcast_packets",
470 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
471 { "tx_vport_broadcast_bytes",
472 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
473 { "rx_vport_rdma_unicast_packets",
474 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
475 { "rx_vport_rdma_unicast_bytes",
476 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
477 { "tx_vport_rdma_unicast_packets",
478 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
479 { "tx_vport_rdma_unicast_bytes",
480 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
481 { "rx_vport_rdma_multicast_packets",
482 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
483 { "rx_vport_rdma_multicast_bytes",
484 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
485 { "tx_vport_rdma_multicast_packets",
486 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
487 { "tx_vport_rdma_multicast_bytes",
488 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
489 };
490
491 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
492
493 static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
494 {
495 return NUM_VPORT_COUNTERS;
496 }
497
498 static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
499 int idx)
500 {
501 int i;
502
503 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
504 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
505 return idx;
506 }
507
508 static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
509 int idx)
510 {
511 int i;
512
513 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
514 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
515 vport_stats_desc, i);
516 return idx;
517 }
518
519 static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
520 {
521 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
522 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
523 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
524 struct mlx5_core_dev *mdev = priv->mdev;
525
526 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
527 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
528 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
529 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
530 }
531
532 #define PPORT_802_3_OFF(c) \
533 MLX5_BYTE_OFF(ppcnt_reg, \
534 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
535 static const struct counter_desc pport_802_3_stats_desc[] = {
536 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
537 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
538 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
539 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
540 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
541 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
542 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
543 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
544 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
545 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
546 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
547 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
548 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
549 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
550 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
551 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
552 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
553 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
554 };
555
556 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
557
558 static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
559 {
560 return NUM_PPORT_802_3_COUNTERS;
561 }
562
563 static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
564 int idx)
565 {
566 int i;
567
568 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
569 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
570 return idx;
571 }
572
573 static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
574 int idx)
575 {
576 int i;
577
578 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
579 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
580 pport_802_3_stats_desc, i);
581 return idx;
582 }
583
584 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
585 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
586
587 void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
588 {
589 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
590 struct mlx5_core_dev *mdev = priv->mdev;
591 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
592 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
593 void *out;
594
595 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
596 return;
597
598 MLX5_SET(ppcnt_reg, in, local_port, 1);
599 out = pstats->IEEE_802_3_counters;
600 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
601 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
602 }
603
604 #define PPORT_2863_OFF(c) \
605 MLX5_BYTE_OFF(ppcnt_reg, \
606 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
607 static const struct counter_desc pport_2863_stats_desc[] = {
608 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
609 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
610 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
611 };
612
613 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
614
615 static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
616 {
617 return NUM_PPORT_2863_COUNTERS;
618 }
619
620 static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
621 int idx)
622 {
623 int i;
624
625 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
626 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
627 return idx;
628 }
629
630 static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
631 int idx)
632 {
633 int i;
634
635 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
636 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
637 pport_2863_stats_desc, i);
638 return idx;
639 }
640
641 static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
642 {
643 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
644 struct mlx5_core_dev *mdev = priv->mdev;
645 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
646 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
647 void *out;
648
649 MLX5_SET(ppcnt_reg, in, local_port, 1);
650 out = pstats->RFC_2863_counters;
651 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
652 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
653 }
654
655 #define PPORT_2819_OFF(c) \
656 MLX5_BYTE_OFF(ppcnt_reg, \
657 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
658 static const struct counter_desc pport_2819_stats_desc[] = {
659 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
660 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
661 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
662 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
663 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
664 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
665 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
666 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
667 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
668 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
669 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
670 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
671 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
672 };
673
674 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
675
676 static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
677 {
678 return NUM_PPORT_2819_COUNTERS;
679 }
680
681 static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
682 int idx)
683 {
684 int i;
685
686 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
687 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
688 return idx;
689 }
690
691 static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
692 int idx)
693 {
694 int i;
695
696 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
697 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
698 pport_2819_stats_desc, i);
699 return idx;
700 }
701
702 static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
703 {
704 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
705 struct mlx5_core_dev *mdev = priv->mdev;
706 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
707 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
708 void *out;
709
710 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
711 return;
712
713 MLX5_SET(ppcnt_reg, in, local_port, 1);
714 out = pstats->RFC_2819_counters;
715 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
716 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
717 }
718
719 #define PPORT_PHY_STATISTICAL_OFF(c) \
720 MLX5_BYTE_OFF(ppcnt_reg, \
721 counter_set.phys_layer_statistical_cntrs.c##_high)
722 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
723 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
724 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
725 };
726
727 static const struct counter_desc
728 pport_phy_statistical_err_lanes_stats_desc[] = {
729 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
730 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
731 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
732 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
733 };
734
735 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
736 ARRAY_SIZE(pport_phy_statistical_stats_desc)
737 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
738 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
739
740 static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
741 {
742 struct mlx5_core_dev *mdev = priv->mdev;
743 int num_stats;
744
745
746 num_stats = 1;
747
748 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
749 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
750
751 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
752 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
753
754 return num_stats;
755 }
756
757 static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
758 int idx)
759 {
760 struct mlx5_core_dev *mdev = priv->mdev;
761 int i;
762
763 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
764
765 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
766 return idx;
767
768 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
769 strcpy(data + (idx++) * ETH_GSTRING_LEN,
770 pport_phy_statistical_stats_desc[i].format);
771
772 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
773 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
774 strcpy(data + (idx++) * ETH_GSTRING_LEN,
775 pport_phy_statistical_err_lanes_stats_desc[i].format);
776
777 return idx;
778 }
779
780 static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
781 {
782 struct mlx5_core_dev *mdev = priv->mdev;
783 int i;
784
785
786 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
787 counter_set.phys_layer_cntrs.link_down_events);
788
789 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
790 return idx;
791
792 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
793 data[idx++] =
794 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
795 pport_phy_statistical_stats_desc, i);
796
797 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
798 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
799 data[idx++] =
800 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
801 pport_phy_statistical_err_lanes_stats_desc,
802 i);
803 return idx;
804 }
805
806 static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
807 {
808 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
809 struct mlx5_core_dev *mdev = priv->mdev;
810 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
811 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
812 void *out;
813
814 MLX5_SET(ppcnt_reg, in, local_port, 1);
815 out = pstats->phy_counters;
816 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
817 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
818
819 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
820 return;
821
822 out = pstats->phy_statistical_counters;
823 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
824 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
825 }
826
827 #define PPORT_ETH_EXT_OFF(c) \
828 MLX5_BYTE_OFF(ppcnt_reg, \
829 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
830 static const struct counter_desc pport_eth_ext_stats_desc[] = {
831 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
832 };
833
834 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
835
836 static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
837 {
838 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
839 return NUM_PPORT_ETH_EXT_COUNTERS;
840
841 return 0;
842 }
843
844 static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
845 int idx)
846 {
847 int i;
848
849 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
850 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
851 strcpy(data + (idx++) * ETH_GSTRING_LEN,
852 pport_eth_ext_stats_desc[i].format);
853 return idx;
854 }
855
856 static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
857 int idx)
858 {
859 int i;
860
861 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
862 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
863 data[idx++] =
864 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
865 pport_eth_ext_stats_desc, i);
866 return idx;
867 }
868
869 static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
870 {
871 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
872 struct mlx5_core_dev *mdev = priv->mdev;
873 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
874 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
875 void *out;
876
877 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
878 return;
879
880 MLX5_SET(ppcnt_reg, in, local_port, 1);
881 out = pstats->eth_ext_counters;
882 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
883 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
884 }
885
886 #define PCIE_PERF_OFF(c) \
887 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
888 static const struct counter_desc pcie_perf_stats_desc[] = {
889 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
890 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
891 };
892
893 #define PCIE_PERF_OFF64(c) \
894 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
895 static const struct counter_desc pcie_perf_stats_desc64[] = {
896 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
897 };
898
899 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
900 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
901 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
902 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
903 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
904 };
905
906 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
907 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
908 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
909
910 static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
911 {
912 int num_stats = 0;
913
914 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
915 num_stats += NUM_PCIE_PERF_COUNTERS;
916
917 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
918 num_stats += NUM_PCIE_PERF_COUNTERS64;
919
920 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
921 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
922
923 return num_stats;
924 }
925
926 static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
927 int idx)
928 {
929 int i;
930
931 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
932 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
933 strcpy(data + (idx++) * ETH_GSTRING_LEN,
934 pcie_perf_stats_desc[i].format);
935
936 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
937 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
938 strcpy(data + (idx++) * ETH_GSTRING_LEN,
939 pcie_perf_stats_desc64[i].format);
940
941 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
942 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
943 strcpy(data + (idx++) * ETH_GSTRING_LEN,
944 pcie_perf_stall_stats_desc[i].format);
945 return idx;
946 }
947
948 static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
949 int idx)
950 {
951 int i;
952
953 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
954 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
955 data[idx++] =
956 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
957 pcie_perf_stats_desc, i);
958
959 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
960 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
961 data[idx++] =
962 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
963 pcie_perf_stats_desc64, i);
964
965 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
966 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
967 data[idx++] =
968 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
969 pcie_perf_stall_stats_desc, i);
970 return idx;
971 }
972
973 static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
974 {
975 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
976 struct mlx5_core_dev *mdev = priv->mdev;
977 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
978 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
979 void *out;
980
981 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
982 return;
983
984 out = pcie_stats->pcie_perf_counters;
985 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
986 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
987 }
988
989 #define PPORT_PER_TC_PRIO_OFF(c) \
990 MLX5_BYTE_OFF(ppcnt_reg, \
991 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
992
993 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
994 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
995 };
996
997 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
998
999 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1000 MLX5_BYTE_OFF(ppcnt_reg, \
1001 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1002
1003 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1004 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1005 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1006 };
1007
1008 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1009 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1010
1011 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1012 {
1013 struct mlx5_core_dev *mdev = priv->mdev;
1014
1015 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1016 return 0;
1017
1018 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1019 }
1020
1021 static int mlx5e_grp_per_port_buffer_congest_fill_strings(struct mlx5e_priv *priv,
1022 u8 *data, int idx)
1023 {
1024 struct mlx5_core_dev *mdev = priv->mdev;
1025 int i, prio;
1026
1027 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1028 return idx;
1029
1030 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1031 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1032 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1033 pport_per_tc_prio_stats_desc[i].format, prio);
1034 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1035 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1036 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1037 }
1038
1039 return idx;
1040 }
1041
1042 static int mlx5e_grp_per_port_buffer_congest_fill_stats(struct mlx5e_priv *priv,
1043 u64 *data, int idx)
1044 {
1045 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1046 struct mlx5_core_dev *mdev = priv->mdev;
1047 int i, prio;
1048
1049 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1050 return idx;
1051
1052 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1053 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1054 data[idx++] =
1055 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1056 pport_per_tc_prio_stats_desc, i);
1057 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1058 data[idx++] =
1059 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1060 pport_per_tc_congest_prio_stats_desc, i);
1061 }
1062
1063 return idx;
1064 }
1065
1066 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1067 {
1068 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1069 struct mlx5_core_dev *mdev = priv->mdev;
1070 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1071 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1072 void *out;
1073 int prio;
1074
1075 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1076 return;
1077
1078 MLX5_SET(ppcnt_reg, in, pnat, 2);
1079 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1080 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1081 out = pstats->per_tc_prio_counters[prio];
1082 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1083 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1084 }
1085 }
1086
1087 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1088 {
1089 struct mlx5_core_dev *mdev = priv->mdev;
1090
1091 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1092 return 0;
1093
1094 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1095 }
1096
1097 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1098 {
1099 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1100 struct mlx5_core_dev *mdev = priv->mdev;
1101 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1102 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1103 void *out;
1104 int prio;
1105
1106 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1107 return;
1108
1109 MLX5_SET(ppcnt_reg, in, pnat, 2);
1110 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1111 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1112 out = pstats->per_tc_congest_prio_counters[prio];
1113 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1114 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1115 }
1116 }
1117
1118 static int mlx5e_grp_per_port_buffer_congest_get_num_stats(struct mlx5e_priv *priv)
1119 {
1120 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1121 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1122 }
1123
1124 static void mlx5e_grp_per_port_buffer_congest_update_stats(struct mlx5e_priv *priv)
1125 {
1126 mlx5e_grp_per_tc_prio_update_stats(priv);
1127 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1128 }
1129
1130 #define PPORT_PER_PRIO_OFF(c) \
1131 MLX5_BYTE_OFF(ppcnt_reg, \
1132 counter_set.eth_per_prio_grp_data_layout.c##_high)
1133 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1134 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1135 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1136 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1137 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1138 };
1139
1140 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1141
1142 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1143 {
1144 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1145 }
1146
1147 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1148 u8 *data,
1149 int idx)
1150 {
1151 int i, prio;
1152
1153 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1154 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1155 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1156 pport_per_prio_traffic_stats_desc[i].format, prio);
1157 }
1158
1159 return idx;
1160 }
1161
1162 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1163 u64 *data,
1164 int idx)
1165 {
1166 int i, prio;
1167
1168 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1169 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1170 data[idx++] =
1171 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1172 pport_per_prio_traffic_stats_desc, i);
1173 }
1174
1175 return idx;
1176 }
1177
1178 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1179
1180 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1181 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1182 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1183 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1184 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1185 };
1186
1187 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1188 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1189 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1190 };
1191
1192 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1193 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1194 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1195 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1196
1197 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1198 {
1199 struct mlx5_core_dev *mdev = priv->mdev;
1200 u8 pfc_en_tx;
1201 u8 pfc_en_rx;
1202 int err;
1203
1204 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1205 return 0;
1206
1207 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1208
1209 return err ? 0 : pfc_en_tx | pfc_en_rx;
1210 }
1211
1212 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1213 {
1214 struct mlx5_core_dev *mdev = priv->mdev;
1215 u32 rx_pause;
1216 u32 tx_pause;
1217 int err;
1218
1219 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1220 return false;
1221
1222 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1223
1224 return err ? false : rx_pause | tx_pause;
1225 }
1226
1227 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1228 {
1229 return (mlx5e_query_global_pause_combined(priv) +
1230 hweight8(mlx5e_query_pfc_combined(priv))) *
1231 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1232 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1233 }
1234
1235 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1236 u8 *data,
1237 int idx)
1238 {
1239 unsigned long pfc_combined;
1240 int i, prio;
1241
1242 pfc_combined = mlx5e_query_pfc_combined(priv);
1243 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1244 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1245 char pfc_string[ETH_GSTRING_LEN];
1246
1247 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1248 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1249 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1250 }
1251 }
1252
1253 if (mlx5e_query_global_pause_combined(priv)) {
1254 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1255 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1256 pport_per_prio_pfc_stats_desc[i].format, "global");
1257 }
1258 }
1259
1260 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1261 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1262 pport_pfc_stall_stats_desc[i].format);
1263
1264 return idx;
1265 }
1266
1267 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1268 u64 *data,
1269 int idx)
1270 {
1271 unsigned long pfc_combined;
1272 int i, prio;
1273
1274 pfc_combined = mlx5e_query_pfc_combined(priv);
1275 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1276 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1277 data[idx++] =
1278 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1279 pport_per_prio_pfc_stats_desc, i);
1280 }
1281 }
1282
1283 if (mlx5e_query_global_pause_combined(priv)) {
1284 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1285 data[idx++] =
1286 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1287 pport_per_prio_pfc_stats_desc, i);
1288 }
1289 }
1290
1291 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1292 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1293 pport_pfc_stall_stats_desc, i);
1294
1295 return idx;
1296 }
1297
1298 static int mlx5e_grp_per_prio_get_num_stats(struct mlx5e_priv *priv)
1299 {
1300 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1301 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1302 }
1303
1304 static int mlx5e_grp_per_prio_fill_strings(struct mlx5e_priv *priv, u8 *data,
1305 int idx)
1306 {
1307 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1308 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1309 return idx;
1310 }
1311
1312 static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
1313 int idx)
1314 {
1315 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1316 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1317 return idx;
1318 }
1319
1320 static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
1321 {
1322 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1323 struct mlx5_core_dev *mdev = priv->mdev;
1324 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1325 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1326 int prio;
1327 void *out;
1328
1329 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1330 return;
1331
1332 MLX5_SET(ppcnt_reg, in, local_port, 1);
1333 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1334 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1335 out = pstats->per_prio_counters[prio];
1336 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1337 mlx5_core_access_reg(mdev, in, sz, out, sz,
1338 MLX5_REG_PPCNT, 0, 0);
1339 }
1340 }
1341
1342 static const struct counter_desc mlx5e_pme_status_desc[] = {
1343 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1344 };
1345
1346 static const struct counter_desc mlx5e_pme_error_desc[] = {
1347 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1348 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1349 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1350 };
1351
1352 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1353 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1354
1355 static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
1356 {
1357 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1358 }
1359
1360 static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
1361 int idx)
1362 {
1363 int i;
1364
1365 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1366 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1367
1368 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1369 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1370
1371 return idx;
1372 }
1373
1374 static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
1375 int idx)
1376 {
1377 struct mlx5_pme_stats pme_stats;
1378 int i;
1379
1380 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1381
1382 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1383 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1384 mlx5e_pme_status_desc, i);
1385
1386 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1387 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1388 mlx5e_pme_error_desc, i);
1389
1390 return idx;
1391 }
1392
1393 static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
1394 {
1395 return mlx5e_ipsec_get_count(priv);
1396 }
1397
1398 static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
1399 int idx)
1400 {
1401 return idx + mlx5e_ipsec_get_strings(priv,
1402 data + idx * ETH_GSTRING_LEN);
1403 }
1404
1405 static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
1406 int idx)
1407 {
1408 return idx + mlx5e_ipsec_get_stats(priv, data + idx);
1409 }
1410
1411 static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
1412 {
1413 mlx5e_ipsec_update_stats(priv);
1414 }
1415
1416 static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv)
1417 {
1418 return mlx5e_tls_get_count(priv);
1419 }
1420
1421 static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data,
1422 int idx)
1423 {
1424 return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1425 }
1426
1427 static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
1428 {
1429 return idx + mlx5e_tls_get_stats(priv, data + idx);
1430 }
1431
1432 static const struct counter_desc rq_stats_desc[] = {
1433 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1434 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1435 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1436 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1437 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1438 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1439 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1440 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1441 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1442 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1443 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1444 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1445 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1446 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1447 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1448 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1449 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1450 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1451 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1452 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1453 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1454 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
1455 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
1456 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
1457 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
1458 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
1459 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1460 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1461 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1462 };
1463
1464 static const struct counter_desc sq_stats_desc[] = {
1465 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
1466 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
1467 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
1468 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
1469 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
1470 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
1471 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
1472 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
1473 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
1474 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
1475 #ifdef CONFIG_MLX5_EN_TLS
1476 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
1477 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
1478 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
1479 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
1480 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
1481 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
1482 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
1483 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
1484 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
1485 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
1486 #endif
1487 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
1488 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
1489 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
1490 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
1491 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
1492 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
1493 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
1494 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
1495 };
1496
1497 static const struct counter_desc rq_xdpsq_stats_desc[] = {
1498 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1499 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1500 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1501 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1502 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1503 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1504 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1505 };
1506
1507 static const struct counter_desc xdpsq_stats_desc[] = {
1508 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1509 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1510 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1511 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
1512 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1513 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1514 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1515 };
1516
1517 static const struct counter_desc xskrq_stats_desc[] = {
1518 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
1519 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
1520 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
1521 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1522 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1523 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
1524 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
1525 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1526 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
1527 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1528 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
1529 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1530 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1531 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1532 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1533 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1534 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1535 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
1536 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
1537 };
1538
1539 static const struct counter_desc xsksq_stats_desc[] = {
1540 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
1541 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
1542 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
1543 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
1544 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
1545 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
1546 };
1547
1548 static const struct counter_desc ch_stats_desc[] = {
1549 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
1550 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
1551 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
1552 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
1553 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
1554 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
1555 };
1556
1557 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
1558 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
1559 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
1560 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
1561 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
1562 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
1563 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
1564
1565 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
1566 {
1567 int max_nch = priv->max_nch;
1568
1569 return (NUM_RQ_STATS * max_nch) +
1570 (NUM_CH_STATS * max_nch) +
1571 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
1572 (NUM_RQ_XDPSQ_STATS * max_nch) +
1573 (NUM_XDPSQ_STATS * max_nch) +
1574 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
1575 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
1576 }
1577
1578 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
1579 int idx)
1580 {
1581 bool is_xsk = priv->xsk.ever_used;
1582 int max_nch = priv->max_nch;
1583 int i, j, tc;
1584
1585 for (i = 0; i < max_nch; i++)
1586 for (j = 0; j < NUM_CH_STATS; j++)
1587 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1588 ch_stats_desc[j].format, i);
1589
1590 for (i = 0; i < max_nch; i++) {
1591 for (j = 0; j < NUM_RQ_STATS; j++)
1592 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1593 rq_stats_desc[j].format, i);
1594 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1595 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1596 xskrq_stats_desc[j].format, i);
1597 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1598 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1599 rq_xdpsq_stats_desc[j].format, i);
1600 }
1601
1602 for (tc = 0; tc < priv->max_opened_tc; tc++)
1603 for (i = 0; i < max_nch; i++)
1604 for (j = 0; j < NUM_SQ_STATS; j++)
1605 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1606 sq_stats_desc[j].format,
1607 i + tc * max_nch);
1608
1609 for (i = 0; i < max_nch; i++) {
1610 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1611 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1612 xsksq_stats_desc[j].format, i);
1613 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1614 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1615 xdpsq_stats_desc[j].format, i);
1616 }
1617
1618 return idx;
1619 }
1620
1621 static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
1622 int idx)
1623 {
1624 bool is_xsk = priv->xsk.ever_used;
1625 int max_nch = priv->max_nch;
1626 int i, j, tc;
1627
1628 for (i = 0; i < max_nch; i++)
1629 for (j = 0; j < NUM_CH_STATS; j++)
1630 data[idx++] =
1631 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
1632 ch_stats_desc, j);
1633
1634 for (i = 0; i < max_nch; i++) {
1635 for (j = 0; j < NUM_RQ_STATS; j++)
1636 data[idx++] =
1637 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
1638 rq_stats_desc, j);
1639 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
1640 data[idx++] =
1641 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xskrq,
1642 xskrq_stats_desc, j);
1643 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
1644 data[idx++] =
1645 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq,
1646 rq_xdpsq_stats_desc, j);
1647 }
1648
1649 for (tc = 0; tc < priv->max_opened_tc; tc++)
1650 for (i = 0; i < max_nch; i++)
1651 for (j = 0; j < NUM_SQ_STATS; j++)
1652 data[idx++] =
1653 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
1654 sq_stats_desc, j);
1655
1656 for (i = 0; i < max_nch; i++) {
1657 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
1658 data[idx++] =
1659 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xsksq,
1660 xsksq_stats_desc, j);
1661 for (j = 0; j < NUM_XDPSQ_STATS; j++)
1662 data[idx++] =
1663 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq,
1664 xdpsq_stats_desc, j);
1665 }
1666
1667 return idx;
1668 }
1669
1670
1671 const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
1672 {
1673 .get_num_stats = mlx5e_grp_sw_get_num_stats,
1674 .fill_strings = mlx5e_grp_sw_fill_strings,
1675 .fill_stats = mlx5e_grp_sw_fill_stats,
1676 .update_stats = mlx5e_grp_sw_update_stats,
1677 },
1678 {
1679 .get_num_stats = mlx5e_grp_q_get_num_stats,
1680 .fill_strings = mlx5e_grp_q_fill_strings,
1681 .fill_stats = mlx5e_grp_q_fill_stats,
1682 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1683 .update_stats = mlx5e_grp_q_update_stats,
1684 },
1685 {
1686 .get_num_stats = mlx5e_grp_vnic_env_get_num_stats,
1687 .fill_strings = mlx5e_grp_vnic_env_fill_strings,
1688 .fill_stats = mlx5e_grp_vnic_env_fill_stats,
1689 .update_stats = mlx5e_grp_vnic_env_update_stats,
1690 },
1691 {
1692 .get_num_stats = mlx5e_grp_vport_get_num_stats,
1693 .fill_strings = mlx5e_grp_vport_fill_strings,
1694 .fill_stats = mlx5e_grp_vport_fill_stats,
1695 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1696 .update_stats = mlx5e_grp_vport_update_stats,
1697 },
1698 {
1699 .get_num_stats = mlx5e_grp_802_3_get_num_stats,
1700 .fill_strings = mlx5e_grp_802_3_fill_strings,
1701 .fill_stats = mlx5e_grp_802_3_fill_stats,
1702 .update_stats_mask = MLX5E_NDO_UPDATE_STATS,
1703 .update_stats = mlx5e_grp_802_3_update_stats,
1704 },
1705 {
1706 .get_num_stats = mlx5e_grp_2863_get_num_stats,
1707 .fill_strings = mlx5e_grp_2863_fill_strings,
1708 .fill_stats = mlx5e_grp_2863_fill_stats,
1709 .update_stats = mlx5e_grp_2863_update_stats,
1710 },
1711 {
1712 .get_num_stats = mlx5e_grp_2819_get_num_stats,
1713 .fill_strings = mlx5e_grp_2819_fill_strings,
1714 .fill_stats = mlx5e_grp_2819_fill_stats,
1715 .update_stats = mlx5e_grp_2819_update_stats,
1716 },
1717 {
1718 .get_num_stats = mlx5e_grp_phy_get_num_stats,
1719 .fill_strings = mlx5e_grp_phy_fill_strings,
1720 .fill_stats = mlx5e_grp_phy_fill_stats,
1721 .update_stats = mlx5e_grp_phy_update_stats,
1722 },
1723 {
1724 .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
1725 .fill_strings = mlx5e_grp_eth_ext_fill_strings,
1726 .fill_stats = mlx5e_grp_eth_ext_fill_stats,
1727 .update_stats = mlx5e_grp_eth_ext_update_stats,
1728 },
1729 {
1730 .get_num_stats = mlx5e_grp_pcie_get_num_stats,
1731 .fill_strings = mlx5e_grp_pcie_fill_strings,
1732 .fill_stats = mlx5e_grp_pcie_fill_stats,
1733 .update_stats = mlx5e_grp_pcie_update_stats,
1734 },
1735 {
1736 .get_num_stats = mlx5e_grp_per_prio_get_num_stats,
1737 .fill_strings = mlx5e_grp_per_prio_fill_strings,
1738 .fill_stats = mlx5e_grp_per_prio_fill_stats,
1739 .update_stats = mlx5e_grp_per_prio_update_stats,
1740 },
1741 {
1742 .get_num_stats = mlx5e_grp_pme_get_num_stats,
1743 .fill_strings = mlx5e_grp_pme_fill_strings,
1744 .fill_stats = mlx5e_grp_pme_fill_stats,
1745 },
1746 {
1747 .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
1748 .fill_strings = mlx5e_grp_ipsec_fill_strings,
1749 .fill_stats = mlx5e_grp_ipsec_fill_stats,
1750 .update_stats = mlx5e_grp_ipsec_update_stats,
1751 },
1752 {
1753 .get_num_stats = mlx5e_grp_tls_get_num_stats,
1754 .fill_strings = mlx5e_grp_tls_fill_strings,
1755 .fill_stats = mlx5e_grp_tls_fill_stats,
1756 },
1757 {
1758 .get_num_stats = mlx5e_grp_channels_get_num_stats,
1759 .fill_strings = mlx5e_grp_channels_fill_strings,
1760 .fill_stats = mlx5e_grp_channels_fill_stats,
1761 },
1762 {
1763 .get_num_stats = mlx5e_grp_per_port_buffer_congest_get_num_stats,
1764 .fill_strings = mlx5e_grp_per_port_buffer_congest_fill_strings,
1765 .fill_stats = mlx5e_grp_per_port_buffer_congest_fill_stats,
1766 .update_stats = mlx5e_grp_per_port_buffer_congest_update_stats,
1767 },
1768 };
1769
1770 const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);