This source file includes following definitions.
- mlxsw_sp_qdisc_compare
- mlxsw_sp_qdisc_find
- mlxsw_sp_qdisc_find_by_handle
- mlxsw_sp_qdisc_destroy
- mlxsw_sp_qdisc_replace
- mlxsw_sp_qdisc_get_stats
- mlxsw_sp_qdisc_get_xstats
- mlxsw_sp_xstats_backlog
- mlxsw_sp_xstats_tail_drop
- mlxsw_sp_qdisc_bstats_per_priority_get
- mlxsw_sp_tclass_congestion_enable
- mlxsw_sp_tclass_congestion_disable
- mlxsw_sp_setup_tc_qdisc_red_clean_stats
- mlxsw_sp_qdisc_red_destroy
- mlxsw_sp_qdisc_red_check_params
- mlxsw_sp_qdisc_red_replace
- mlxsw_sp_qdisc_red_unoffload
- mlxsw_sp_qdisc_get_red_xstats
- mlxsw_sp_qdisc_get_red_stats
- mlxsw_sp_setup_tc_red
- mlxsw_sp_qdisc_prio_destroy
- mlxsw_sp_qdisc_prio_check_params
- mlxsw_sp_qdisc_prio_replace
- mlxsw_sp_qdisc_prio_unoffload
- mlxsw_sp_qdisc_get_prio_stats
- mlxsw_sp_setup_tc_qdisc_prio_clean_stats
- mlxsw_sp_qdisc_prio_graft
- mlxsw_sp_setup_tc_prio
- mlxsw_sp_tc_qdisc_init
- mlxsw_sp_tc_qdisc_fini
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9
10 #include "spectrum.h"
11 #include "reg.h"
12
13 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
14 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
15 MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
16
17 enum mlxsw_sp_qdisc_type {
18 MLXSW_SP_QDISC_NO_QDISC,
19 MLXSW_SP_QDISC_RED,
20 MLXSW_SP_QDISC_PRIO,
21 };
22
23 struct mlxsw_sp_qdisc_ops {
24 enum mlxsw_sp_qdisc_type type;
25 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
26 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
27 void *params);
28 int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
29 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
30 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
31 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
32 int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
33 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
34 struct tc_qopt_offload_stats *stats_ptr);
35 int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
36 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
37 void *xstats_ptr);
38 void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
40
41
42
43 void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
44 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
45 };
46
47 struct mlxsw_sp_qdisc {
48 u32 handle;
49 u8 tclass_num;
50 u8 prio_bitmap;
51 union {
52 struct red_stats red;
53 } xstats_base;
54 struct mlxsw_sp_qdisc_stats {
55 u64 tx_bytes;
56 u64 tx_packets;
57 u64 drops;
58 u64 overlimits;
59 u64 backlog;
60 } stats_base;
61
62 struct mlxsw_sp_qdisc_ops *ops;
63 };
64
65 static bool
66 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
67 enum mlxsw_sp_qdisc_type type)
68 {
69 return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
70 mlxsw_sp_qdisc->ops->type == type &&
71 mlxsw_sp_qdisc->handle == handle;
72 }
73
74 static struct mlxsw_sp_qdisc *
75 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
76 bool root_only)
77 {
78 int tclass, child_index;
79
80 if (parent == TC_H_ROOT)
81 return mlxsw_sp_port->root_qdisc;
82
83 if (root_only || !mlxsw_sp_port->root_qdisc ||
84 !mlxsw_sp_port->root_qdisc->ops ||
85 TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
86 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
87 return NULL;
88
89 child_index = TC_H_MIN(parent);
90 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
91 return &mlxsw_sp_port->tclass_qdiscs[tclass];
92 }
93
94 static struct mlxsw_sp_qdisc *
95 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
96 {
97 int i;
98
99 if (mlxsw_sp_port->root_qdisc->handle == handle)
100 return mlxsw_sp_port->root_qdisc;
101
102 if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
103 return NULL;
104
105 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
107 return &mlxsw_sp_port->tclass_qdiscs[i];
108
109 return NULL;
110 }
111
112 static int
113 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
114 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
115 {
116 int err = 0;
117
118 if (!mlxsw_sp_qdisc)
119 return 0;
120
121 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
122 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
123 mlxsw_sp_qdisc);
124
125 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
126 mlxsw_sp_qdisc->ops = NULL;
127 return err;
128 }
129
130 static int
131 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
132 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
133 struct mlxsw_sp_qdisc_ops *ops, void *params)
134 {
135 int err;
136
137 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
138
139
140
141
142
143 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
144 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
145 if (err)
146 goto err_bad_param;
147
148 err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
149 if (err)
150 goto err_config;
151
152 if (mlxsw_sp_qdisc->handle != handle) {
153 mlxsw_sp_qdisc->ops = ops;
154 if (ops->clean_stats)
155 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
156 }
157
158 mlxsw_sp_qdisc->handle = handle;
159 return 0;
160
161 err_bad_param:
162 err_config:
163 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
164 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
165
166 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
167 return err;
168 }
169
170 static int
171 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
172 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 struct tc_qopt_offload_stats *stats_ptr)
174 {
175 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
176 mlxsw_sp_qdisc->ops->get_stats)
177 return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
178 mlxsw_sp_qdisc,
179 stats_ptr);
180
181 return -EOPNOTSUPP;
182 }
183
184 static int
185 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
186 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
187 void *xstats_ptr)
188 {
189 if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
190 mlxsw_sp_qdisc->ops->get_xstats)
191 return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
192 mlxsw_sp_qdisc,
193 xstats_ptr);
194
195 return -EOPNOTSUPP;
196 }
197
198 static u64
199 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
200 {
201 return xstats->backlog[tclass_num] +
202 xstats->backlog[tclass_num + 8];
203 }
204
205 static u64
206 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
207 {
208 return xstats->tail_drop[tclass_num] +
209 xstats->tail_drop[tclass_num + 8];
210 }
211
212 static void
213 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
214 u8 prio_bitmap, u64 *tx_packets,
215 u64 *tx_bytes)
216 {
217 int i;
218
219 *tx_packets = 0;
220 *tx_bytes = 0;
221 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
222 if (prio_bitmap & BIT(i)) {
223 *tx_packets += xstats->tx_packets[i];
224 *tx_bytes += xstats->tx_bytes[i];
225 }
226 }
227 }
228
229 static int
230 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
231 int tclass_num, u32 min, u32 max,
232 u32 probability, bool is_ecn)
233 {
234 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
235 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
236 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
237 int err;
238
239 mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
240 mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
241 roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
242 roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
243 probability);
244
245 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
246 if (err)
247 return err;
248
249 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
250 MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
251
252 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
253 }
254
255 static int
256 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
257 int tclass_num)
258 {
259 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
260 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
261
262 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
263 MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
265 }
266
267 static void
268 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
269 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
270 {
271 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
272 struct mlxsw_sp_qdisc_stats *stats_base;
273 struct mlxsw_sp_port_xstats *xstats;
274 struct red_stats *red_base;
275
276 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
277 stats_base = &mlxsw_sp_qdisc->stats_base;
278 red_base = &mlxsw_sp_qdisc->xstats_base.red;
279
280 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
281 mlxsw_sp_qdisc->prio_bitmap,
282 &stats_base->tx_packets,
283 &stats_base->tx_bytes);
284 red_base->prob_mark = xstats->ecn;
285 red_base->prob_drop = xstats->wred_drop[tclass_num];
286 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
287
288 stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
289 stats_base->drops = red_base->prob_drop + red_base->pdrop;
290
291 stats_base->backlog = 0;
292 }
293
294 static int
295 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
296 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
297 {
298 struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
299
300 if (root_qdisc != mlxsw_sp_qdisc)
301 root_qdisc->stats_base.backlog -=
302 mlxsw_sp_qdisc->stats_base.backlog;
303
304 return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
305 mlxsw_sp_qdisc->tclass_num);
306 }
307
308 static int
309 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
310 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
311 void *params)
312 {
313 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
314 struct tc_red_qopt_offload_params *p = params;
315
316 if (p->min > p->max) {
317 dev_err(mlxsw_sp->bus_info->dev,
318 "spectrum: RED: min %u is bigger then max %u\n", p->min,
319 p->max);
320 return -EINVAL;
321 }
322 if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
323 dev_err(mlxsw_sp->bus_info->dev,
324 "spectrum: RED: max value %u is too big\n", p->max);
325 return -EINVAL;
326 }
327 if (p->min == 0 || p->max == 0) {
328 dev_err(mlxsw_sp->bus_info->dev,
329 "spectrum: RED: 0 value is illegal for min and max\n");
330 return -EINVAL;
331 }
332 return 0;
333 }
334
335 static int
336 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
337 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
338 void *params)
339 {
340 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
341 struct tc_red_qopt_offload_params *p = params;
342 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
343 u32 min, max;
344 u64 prob;
345
346
347 prob = p->probability;
348 prob *= 100;
349 prob = DIV_ROUND_UP(prob, 1 << 16);
350 prob = DIV_ROUND_UP(prob, 1 << 16);
351 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
352 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
353 return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
354 max, prob, p->is_ecn);
355 }
356
357 static void
358 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
359 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
360 void *params)
361 {
362 struct tc_red_qopt_offload_params *p = params;
363 u64 backlog;
364
365 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
366 mlxsw_sp_qdisc->stats_base.backlog);
367 p->qstats->backlog -= backlog;
368 mlxsw_sp_qdisc->stats_base.backlog = 0;
369 }
370
371 static int
372 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
373 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
374 void *xstats_ptr)
375 {
376 struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
377 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
378 struct mlxsw_sp_port_xstats *xstats;
379 struct red_stats *res = xstats_ptr;
380 int early_drops, marks, pdrops;
381
382 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
383
384 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
385 marks = xstats->ecn - xstats_base->prob_mark;
386 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
387 xstats_base->pdrop;
388
389 res->pdrop += pdrops;
390 res->prob_drop += early_drops;
391 res->prob_mark += marks;
392
393 xstats_base->pdrop += pdrops;
394 xstats_base->prob_drop += early_drops;
395 xstats_base->prob_mark += marks;
396 return 0;
397 }
398
399 static int
400 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
401 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
402 struct tc_qopt_offload_stats *stats_ptr)
403 {
404 u64 tx_bytes, tx_packets, overlimits, drops, backlog;
405 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
406 struct mlxsw_sp_qdisc_stats *stats_base;
407 struct mlxsw_sp_port_xstats *xstats;
408
409 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
410 stats_base = &mlxsw_sp_qdisc->stats_base;
411
412 mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
413 mlxsw_sp_qdisc->prio_bitmap,
414 &tx_packets, &tx_bytes);
415 tx_bytes = tx_bytes - stats_base->tx_bytes;
416 tx_packets = tx_packets - stats_base->tx_packets;
417
418 overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
419 stats_base->overlimits;
420 drops = xstats->wred_drop[tclass_num] +
421 mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
422 stats_base->drops;
423 backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
424
425 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
426 stats_ptr->qstats->overlimits += overlimits;
427 stats_ptr->qstats->drops += drops;
428 stats_ptr->qstats->backlog +=
429 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
430 backlog) -
431 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
432 stats_base->backlog);
433
434 stats_base->backlog = backlog;
435 stats_base->drops += drops;
436 stats_base->overlimits += overlimits;
437 stats_base->tx_bytes += tx_bytes;
438 stats_base->tx_packets += tx_packets;
439 return 0;
440 }
441
442 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
443
444 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
445 .type = MLXSW_SP_QDISC_RED,
446 .check_params = mlxsw_sp_qdisc_red_check_params,
447 .replace = mlxsw_sp_qdisc_red_replace,
448 .unoffload = mlxsw_sp_qdisc_red_unoffload,
449 .destroy = mlxsw_sp_qdisc_red_destroy,
450 .get_stats = mlxsw_sp_qdisc_get_red_stats,
451 .get_xstats = mlxsw_sp_qdisc_get_red_xstats,
452 .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
453 };
454
455 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
456 struct tc_red_qopt_offload *p)
457 {
458 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
459
460 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
461 if (!mlxsw_sp_qdisc)
462 return -EOPNOTSUPP;
463
464 if (p->command == TC_RED_REPLACE)
465 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
466 mlxsw_sp_qdisc,
467 &mlxsw_sp_qdisc_ops_red,
468 &p->set);
469
470 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
471 MLXSW_SP_QDISC_RED))
472 return -EOPNOTSUPP;
473
474 switch (p->command) {
475 case TC_RED_DESTROY:
476 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
477 case TC_RED_XSTATS:
478 return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
479 p->xstats);
480 case TC_RED_STATS:
481 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
482 &p->stats);
483 default:
484 return -EOPNOTSUPP;
485 }
486 }
487
488 static int
489 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
490 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
491 {
492 int i;
493
494 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
495 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
496 MLXSW_SP_PORT_DEFAULT_TCLASS);
497 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
498 &mlxsw_sp_port->tclass_qdiscs[i]);
499 mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
500 }
501
502 return 0;
503 }
504
505 static int
506 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
507 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
508 void *params)
509 {
510 struct tc_prio_qopt_offload_params *p = params;
511
512 if (p->bands > IEEE_8021QAZ_MAX_TCS)
513 return -EOPNOTSUPP;
514
515 return 0;
516 }
517
518 static int
519 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
520 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
521 void *params)
522 {
523 struct tc_prio_qopt_offload_params *p = params;
524 struct mlxsw_sp_qdisc *child_qdisc;
525 int tclass, i, band, backlog;
526 u8 old_priomap;
527 int err;
528
529 for (band = 0; band < p->bands; band++) {
530 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
531 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
532 old_priomap = child_qdisc->prio_bitmap;
533 child_qdisc->prio_bitmap = 0;
534 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
535 if (p->priomap[i] == band) {
536 child_qdisc->prio_bitmap |= BIT(i);
537 if (BIT(i) & old_priomap)
538 continue;
539 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
540 i, tclass);
541 if (err)
542 return err;
543 }
544 }
545 if (old_priomap != child_qdisc->prio_bitmap &&
546 child_qdisc->ops && child_qdisc->ops->clean_stats) {
547 backlog = child_qdisc->stats_base.backlog;
548 child_qdisc->ops->clean_stats(mlxsw_sp_port,
549 child_qdisc);
550 child_qdisc->stats_base.backlog = backlog;
551 }
552 }
553 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
554 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
555 child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
556 child_qdisc->prio_bitmap = 0;
557 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
558 }
559 return 0;
560 }
561
562 static void
563 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
564 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
565 void *params)
566 {
567 struct tc_prio_qopt_offload_params *p = params;
568 u64 backlog;
569
570 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
571 mlxsw_sp_qdisc->stats_base.backlog);
572 p->qstats->backlog -= backlog;
573 }
574
575 static int
576 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
577 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
578 struct tc_qopt_offload_stats *stats_ptr)
579 {
580 u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
581 struct mlxsw_sp_qdisc_stats *stats_base;
582 struct mlxsw_sp_port_xstats *xstats;
583 struct rtnl_link_stats64 *stats;
584 int i;
585
586 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
587 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
588 stats_base = &mlxsw_sp_qdisc->stats_base;
589
590 tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
591 tx_packets = stats->tx_packets - stats_base->tx_packets;
592
593 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
594 drops += mlxsw_sp_xstats_tail_drop(xstats, i);
595 drops += xstats->wred_drop[i];
596 backlog += mlxsw_sp_xstats_backlog(xstats, i);
597 }
598 drops = drops - stats_base->drops;
599
600 _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
601 stats_ptr->qstats->drops += drops;
602 stats_ptr->qstats->backlog +=
603 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
604 backlog) -
605 mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
606 stats_base->backlog);
607 stats_base->backlog = backlog;
608 stats_base->drops += drops;
609 stats_base->tx_bytes += tx_bytes;
610 stats_base->tx_packets += tx_packets;
611 return 0;
612 }
613
614 static void
615 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
616 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
617 {
618 struct mlxsw_sp_qdisc_stats *stats_base;
619 struct mlxsw_sp_port_xstats *xstats;
620 struct rtnl_link_stats64 *stats;
621 int i;
622
623 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
624 stats = &mlxsw_sp_port->periodic_hw_stats.stats;
625 stats_base = &mlxsw_sp_qdisc->stats_base;
626
627 stats_base->tx_packets = stats->tx_packets;
628 stats_base->tx_bytes = stats->tx_bytes;
629
630 stats_base->drops = 0;
631 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
632 stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
633 stats_base->drops += xstats->wred_drop[i];
634 }
635
636 mlxsw_sp_qdisc->stats_base.backlog = 0;
637 }
638
639 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
640 .type = MLXSW_SP_QDISC_PRIO,
641 .check_params = mlxsw_sp_qdisc_prio_check_params,
642 .replace = mlxsw_sp_qdisc_prio_replace,
643 .unoffload = mlxsw_sp_qdisc_prio_unoffload,
644 .destroy = mlxsw_sp_qdisc_prio_destroy,
645 .get_stats = mlxsw_sp_qdisc_get_prio_stats,
646 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
647 };
648
649
650
651
652
653
654 static int
655 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
656 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
657 struct tc_prio_qopt_offload_graft_params *p)
658 {
659 int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
660 struct mlxsw_sp_qdisc *old_qdisc;
661
662
663
664
665 if (p->band < IEEE_8021QAZ_MAX_TCS &&
666 mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
667 return 0;
668
669 if (!p->child_handle) {
670
671
672
673 return 0;
674 }
675
676
677
678
679 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
680 p->child_handle);
681 if (old_qdisc)
682 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
683
684 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
685 &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
686 return -EOPNOTSUPP;
687 }
688
689 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
690 struct tc_prio_qopt_offload *p)
691 {
692 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
693
694 mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
695 if (!mlxsw_sp_qdisc)
696 return -EOPNOTSUPP;
697
698 if (p->command == TC_PRIO_REPLACE)
699 return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
700 mlxsw_sp_qdisc,
701 &mlxsw_sp_qdisc_ops_prio,
702 &p->replace_params);
703
704 if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
705 MLXSW_SP_QDISC_PRIO))
706 return -EOPNOTSUPP;
707
708 switch (p->command) {
709 case TC_PRIO_DESTROY:
710 return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
711 case TC_PRIO_STATS:
712 return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
713 &p->stats);
714 case TC_PRIO_GRAFT:
715 return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
716 &p->graft_params);
717 default:
718 return -EOPNOTSUPP;
719 }
720 }
721
722 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
723 {
724 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
725 int i;
726
727 mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
728 if (!mlxsw_sp_qdisc)
729 goto err_root_qdisc_init;
730
731 mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
732 mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
733 mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
734
735 mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
736 sizeof(*mlxsw_sp_qdisc),
737 GFP_KERNEL);
738 if (!mlxsw_sp_qdisc)
739 goto err_tclass_qdiscs_init;
740
741 mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
742 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
743 mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
744
745 return 0;
746
747 err_tclass_qdiscs_init:
748 kfree(mlxsw_sp_port->root_qdisc);
749 err_root_qdisc_init:
750 return -ENOMEM;
751 }
752
753 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
754 {
755 kfree(mlxsw_sp_port->tclass_qdiscs);
756 kfree(mlxsw_sp_port->root_qdisc);
757 }