This source file includes following definitions.
- bcm_sf2_get_num_udf_slices
- udf_upper_bits
- udf_lower_bits
- bcm_sf2_get_slice_number
- bcm_sf2_cfp_udf_set
- bcm_sf2_cfp_op
- bcm_sf2_cfp_rule_addr_set
- bcm_sf2_cfp_rule_size
- bcm_sf2_cfp_act_pol_set
- bcm_sf2_cfp_slice_ipv4
- bcm_sf2_cfp_ipv4_rule_set
- bcm_sf2_cfp_slice_ipv6
- bcm_sf2_cfp_rule_find
- bcm_sf2_cfp_rule_cmp
- bcm_sf2_cfp_ipv6_rule_set
- bcm_sf2_cfp_rule_insert
- bcm_sf2_cfp_rule_set
- bcm_sf2_cfp_rule_del_one
- bcm_sf2_cfp_rule_remove
- bcm_sf2_cfp_rule_del
- bcm_sf2_invert_masks
- bcm_sf2_cfp_rule_get
- bcm_sf2_cfp_rule_get_all
- bcm_sf2_get_rxnfc
- bcm_sf2_set_rxnfc
- bcm_sf2_cfp_rst
- bcm_sf2_cfp_exit
- bcm_sf2_cfp_resume
- bcm_sf2_cfp_get_strings
- bcm_sf2_cfp_get_ethtool_stats
- bcm_sf2_cfp_get_sset_count
1
2
3
4
5
6
7
8 #include <linux/list.h>
9 #include <linux/ethtool.h>
10 #include <linux/if_ether.h>
11 #include <linux/in.h>
12 #include <linux/netdevice.h>
13 #include <net/dsa.h>
14 #include <linux/bitmap.h>
15 #include <net/flow_offload.h>
16
17 #include "bcm_sf2.h"
18 #include "bcm_sf2_regs.h"
19
20 struct cfp_rule {
21 int port;
22 struct ethtool_rx_flow_spec fs;
23 struct list_head next;
24 };
25
26 struct cfp_udf_slice_layout {
27 u8 slices[UDFS_PER_SLICE];
28 u32 mask_value;
29 u32 base_offset;
30 };
31
32 struct cfp_udf_layout {
33 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
34 };
35
36 static const u8 zero_slice[UDFS_PER_SLICE] = { };
37
38
39 static const struct cfp_udf_layout udf_tcpip4_layout = {
40 .udfs = {
41 [1] = {
42 .slices = {
43
44 CFG_UDF_EOL2 | 6,
45
46 CFG_UDF_EOL2 | 7,
47
48 CFG_UDF_EOL2 | 8,
49
50 CFG_UDF_EOL2 | 9,
51
52 CFG_UDF_EOL3 | 0,
53
54 CFG_UDF_EOL3 | 1,
55 0, 0, 0
56 },
57 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
58 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
59 },
60 },
61 };
62
63
64 static const struct cfp_udf_layout udf_tcpip6_layout = {
65 .udfs = {
66 [0] = {
67 .slices = {
68
69 CFG_UDF_EOL2 | 4,
70
71 CFG_UDF_EOL2 | 5,
72
73 CFG_UDF_EOL2 | 6,
74
75 CFG_UDF_EOL2 | 7,
76
77 CFG_UDF_EOL2 | 8,
78
79 CFG_UDF_EOL2 | 9,
80
81 CFG_UDF_EOL2 | 10,
82
83 CFG_UDF_EOL2 | 11,
84
85 CFG_UDF_EOL3 | 0,
86 },
87 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
88 .base_offset = CORE_UDF_0_B_0_8_PORT_0,
89 },
90 [3] = {
91 .slices = {
92
93 CFG_UDF_EOL2 | 12,
94
95 CFG_UDF_EOL2 | 13,
96
97 CFG_UDF_EOL2 | 14,
98
99 CFG_UDF_EOL2 | 15,
100
101 CFG_UDF_EOL2 | 16,
102
103 CFG_UDF_EOL2 | 17,
104
105 CFG_UDF_EOL2 | 18,
106
107 CFG_UDF_EOL2 | 19,
108
109 CFG_UDF_EOL3 | 1,
110 },
111 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
112 .base_offset = CORE_UDF_0_D_0_11_PORT_0,
113 },
114 },
115 };
116
117 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
118 {
119 unsigned int i, count = 0;
120
121 for (i = 0; i < UDFS_PER_SLICE; i++) {
122 if (layout[i] != 0)
123 count++;
124 }
125
126 return count;
127 }
128
129 static inline u32 udf_upper_bits(unsigned int num_udf)
130 {
131 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
132 }
133
134 static inline u32 udf_lower_bits(unsigned int num_udf)
135 {
136 return (u8)GENMASK(num_udf - 1, 0);
137 }
138
139 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
140 unsigned int start)
141 {
142 const struct cfp_udf_slice_layout *slice_layout;
143 unsigned int slice_idx;
144
145 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
146 slice_layout = &l->udfs[slice_idx];
147 if (memcmp(slice_layout->slices, zero_slice,
148 sizeof(zero_slice)))
149 break;
150 }
151
152 return slice_idx;
153 }
154
155 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
156 const struct cfp_udf_layout *layout,
157 unsigned int slice_num)
158 {
159 u32 offset = layout->udfs[slice_num].base_offset;
160 unsigned int i;
161
162 for (i = 0; i < UDFS_PER_SLICE; i++)
163 core_writel(priv, layout->udfs[slice_num].slices[i],
164 offset + i * 4);
165 }
166
167 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
168 {
169 unsigned int timeout = 1000;
170 u32 reg;
171
172 reg = core_readl(priv, CORE_CFP_ACC);
173 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
174 reg |= OP_STR_DONE | op;
175 core_writel(priv, reg, CORE_CFP_ACC);
176
177 do {
178 reg = core_readl(priv, CORE_CFP_ACC);
179 if (!(reg & OP_STR_DONE))
180 break;
181
182 cpu_relax();
183 } while (timeout--);
184
185 if (!timeout)
186 return -ETIMEDOUT;
187
188 return 0;
189 }
190
191 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
192 unsigned int addr)
193 {
194 u32 reg;
195
196 WARN_ON(addr >= priv->num_cfp_rules);
197
198 reg = core_readl(priv, CORE_CFP_ACC);
199 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
200 reg |= addr << XCESS_ADDR_SHIFT;
201 core_writel(priv, reg, CORE_CFP_ACC);
202 }
203
204 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
205 {
206
207 return priv->num_cfp_rules - 1;
208 }
209
210 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
211 unsigned int rule_index,
212 int src_port,
213 unsigned int port_num,
214 unsigned int queue_num,
215 bool fwd_map_change)
216 {
217 int ret;
218 u32 reg;
219
220
221
222
223 if (fwd_map_change)
224 reg = CHANGE_FWRD_MAP_IB_REP_ARL |
225 BIT(port_num + DST_MAP_IB_SHIFT) |
226 CHANGE_TC | queue_num << NEW_TC_SHIFT;
227 else
228 reg = 0;
229
230
231 if (src_port == port_num)
232 reg |= LOOP_BK_EN;
233
234 core_writel(priv, reg, CORE_ACT_POL_DATA0);
235
236
237 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
238
239 core_writel(priv, 0, CORE_ACT_POL_DATA2);
240
241
242 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
243 if (ret) {
244 pr_err("Policer entry at %d failed\n", rule_index);
245 return ret;
246 }
247
248
249 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
250
251
252 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
253 if (ret) {
254 pr_err("Meter entry at %d failed\n", rule_index);
255 return ret;
256 }
257
258 return 0;
259 }
260
261 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
262 struct flow_dissector_key_ipv4_addrs *addrs,
263 struct flow_dissector_key_ports *ports,
264 unsigned int slice_num,
265 bool mask)
266 {
267 u32 reg, offset;
268
269
270
271
272
273 reg = 0;
274 if (mask)
275 offset = CORE_CFP_MASK_PORT(4);
276 else
277 offset = CORE_CFP_DATA_PORT(4);
278 core_writel(priv, reg, offset);
279
280
281
282
283
284 reg = be16_to_cpu(ports->dst) >> 8;
285 if (mask)
286 offset = CORE_CFP_MASK_PORT(3);
287 else
288 offset = CORE_CFP_DATA_PORT(3);
289 core_writel(priv, reg, offset);
290
291
292
293
294
295 reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
296 (u32)be16_to_cpu(ports->src) << 8 |
297 (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
298 if (mask)
299 offset = CORE_CFP_MASK_PORT(2);
300 else
301 offset = CORE_CFP_DATA_PORT(2);
302 core_writel(priv, reg, offset);
303
304
305
306
307
308 reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
309 (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
310 (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
311 if (mask)
312 offset = CORE_CFP_MASK_PORT(1);
313 else
314 offset = CORE_CFP_DATA_PORT(1);
315 core_writel(priv, reg, offset);
316
317
318
319
320
321
322
323 reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
324 (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
325 SLICE_NUM(slice_num) | SLICE_VALID;
326 if (mask)
327 offset = CORE_CFP_MASK_PORT(0);
328 else
329 offset = CORE_CFP_DATA_PORT(0);
330 core_writel(priv, reg, offset);
331 }
332
333 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
334 unsigned int port_num,
335 unsigned int queue_num,
336 struct ethtool_rx_flow_spec *fs)
337 {
338 struct ethtool_rx_flow_spec_input input = {};
339 const struct cfp_udf_layout *layout;
340 unsigned int slice_num, rule_index;
341 struct ethtool_rx_flow_rule *flow;
342 struct flow_match_ipv4_addrs ipv4;
343 struct flow_match_ports ports;
344 struct flow_match_ip ip;
345 u8 ip_proto, ip_frag;
346 u8 num_udf;
347 u32 reg;
348 int ret;
349
350 switch (fs->flow_type & ~FLOW_EXT) {
351 case TCP_V4_FLOW:
352 ip_proto = IPPROTO_TCP;
353 break;
354 case UDP_V4_FLOW:
355 ip_proto = IPPROTO_UDP;
356 break;
357 default:
358 return -EINVAL;
359 }
360
361 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
362
363
364 if (fs->location == RX_CLS_LOC_ANY)
365 rule_index = find_first_zero_bit(priv->cfp.used,
366 priv->num_cfp_rules);
367 else
368 rule_index = fs->location;
369
370 if (rule_index > bcm_sf2_cfp_rule_size(priv))
371 return -ENOSPC;
372
373 input.fs = fs;
374 flow = ethtool_rx_flow_rule_create(&input);
375 if (IS_ERR(flow))
376 return PTR_ERR(flow);
377
378 flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
379 flow_rule_match_ports(flow->rule, &ports);
380 flow_rule_match_ip(flow->rule, &ip);
381
382 layout = &udf_tcpip4_layout;
383
384 slice_num = bcm_sf2_get_slice_number(layout, 0);
385 if (slice_num == UDF_NUM_SLICES) {
386 ret = -EINVAL;
387 goto out_err_flow_rule;
388 }
389
390 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
391
392
393 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
394
395
396 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
397
398
399 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415 core_writel(priv, ip.key->tos << IPTOS_SHIFT |
416 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
417 udf_upper_bits(num_udf),
418 CORE_CFP_DATA_PORT(6));
419
420
421 core_writel(priv, layout->udfs[slice_num].mask_value |
422 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
423
424
425
426
427
428 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
429
430
431 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
432
433
434 bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
435 bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
436
437
438 bcm_sf2_cfp_rule_addr_set(priv, rule_index);
439
440 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
441 if (ret) {
442 pr_err("TCAM entry at addr %d failed\n", rule_index);
443 goto out_err_flow_rule;
444 }
445
446
447 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
448 queue_num, true);
449 if (ret)
450 goto out_err_flow_rule;
451
452
453 reg = core_readl(priv, CORE_CFP_CTL_REG);
454 reg |= BIT(port);
455 core_writel(priv, reg, CORE_CFP_CTL_REG);
456
457
458 set_bit(rule_index, priv->cfp.used);
459 set_bit(rule_index, priv->cfp.unique);
460 fs->location = rule_index;
461
462 return 0;
463
464 out_err_flow_rule:
465 ethtool_rx_flow_rule_destroy(flow);
466 return ret;
467 }
468
469 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
470 const __be32 *ip6_addr, const __be16 port,
471 unsigned int slice_num,
472 bool mask)
473 {
474 u32 reg, tmp, val, offset;
475
476
477
478
479
480 reg = be32_to_cpu(ip6_addr[3]);
481 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
482 if (mask)
483 offset = CORE_CFP_MASK_PORT(4);
484 else
485 offset = CORE_CFP_DATA_PORT(4);
486 core_writel(priv, val, offset);
487
488
489
490
491
492 tmp = be32_to_cpu(ip6_addr[2]);
493 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
494 ((tmp >> 8) & 0xff);
495 if (mask)
496 offset = CORE_CFP_MASK_PORT(3);
497 else
498 offset = CORE_CFP_DATA_PORT(3);
499 core_writel(priv, val, offset);
500
501
502
503
504
505 reg = be32_to_cpu(ip6_addr[1]);
506 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
507 ((reg >> 8) & 0xff);
508 if (mask)
509 offset = CORE_CFP_MASK_PORT(2);
510 else
511 offset = CORE_CFP_DATA_PORT(2);
512 core_writel(priv, val, offset);
513
514
515
516
517
518 tmp = be32_to_cpu(ip6_addr[0]);
519 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
520 ((tmp >> 8) & 0xff);
521 if (mask)
522 offset = CORE_CFP_MASK_PORT(1);
523 else
524 offset = CORE_CFP_DATA_PORT(1);
525 core_writel(priv, val, offset);
526
527
528
529
530
531
532
533 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
534 SLICE_NUM(slice_num) | SLICE_VALID;
535 if (mask)
536 offset = CORE_CFP_MASK_PORT(0);
537 else
538 offset = CORE_CFP_DATA_PORT(0);
539 core_writel(priv, reg, offset);
540 }
541
542 static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
543 int port, u32 location)
544 {
545 struct cfp_rule *rule = NULL;
546
547 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
548 if (rule->port == port && rule->fs.location == location)
549 break;
550 }
551
552 return rule;
553 }
554
555 static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
556 struct ethtool_rx_flow_spec *fs)
557 {
558 struct cfp_rule *rule = NULL;
559 size_t fs_size = 0;
560 int ret = 1;
561
562 if (list_empty(&priv->cfp.rules_list))
563 return ret;
564
565 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
566 ret = 1;
567 if (rule->port != port)
568 continue;
569
570 if (rule->fs.flow_type != fs->flow_type ||
571 rule->fs.ring_cookie != fs->ring_cookie ||
572 rule->fs.h_ext.data[0] != fs->h_ext.data[0])
573 continue;
574
575 switch (fs->flow_type & ~FLOW_EXT) {
576 case TCP_V6_FLOW:
577 case UDP_V6_FLOW:
578 fs_size = sizeof(struct ethtool_tcpip6_spec);
579 break;
580 case TCP_V4_FLOW:
581 case UDP_V4_FLOW:
582 fs_size = sizeof(struct ethtool_tcpip4_spec);
583 break;
584 default:
585 continue;
586 }
587
588 ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
589 ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
590 if (ret == 0)
591 break;
592 }
593
594 return ret;
595 }
596
597 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
598 unsigned int port_num,
599 unsigned int queue_num,
600 struct ethtool_rx_flow_spec *fs)
601 {
602 struct ethtool_rx_flow_spec_input input = {};
603 unsigned int slice_num, rule_index[2];
604 const struct cfp_udf_layout *layout;
605 struct ethtool_rx_flow_rule *flow;
606 struct flow_match_ipv6_addrs ipv6;
607 struct flow_match_ports ports;
608 u8 ip_proto, ip_frag;
609 int ret = 0;
610 u8 num_udf;
611 u32 reg;
612
613 switch (fs->flow_type & ~FLOW_EXT) {
614 case TCP_V6_FLOW:
615 ip_proto = IPPROTO_TCP;
616 break;
617 case UDP_V6_FLOW:
618 ip_proto = IPPROTO_UDP;
619 break;
620 default:
621 return -EINVAL;
622 }
623
624 ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1);
625
626 layout = &udf_tcpip6_layout;
627 slice_num = bcm_sf2_get_slice_number(layout, 0);
628 if (slice_num == UDF_NUM_SLICES)
629 return -EINVAL;
630
631 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
632
633
634
635
636
637
638
639
640
641
642
643
644
645 if (fs->location == RX_CLS_LOC_ANY)
646 rule_index[1] = find_first_zero_bit(priv->cfp.used,
647 priv->num_cfp_rules);
648 else
649 rule_index[1] = fs->location;
650 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
651 return -ENOSPC;
652
653
654
655
656 set_bit(rule_index[1], priv->cfp.used);
657
658 rule_index[0] = find_first_zero_bit(priv->cfp.used,
659 priv->num_cfp_rules);
660 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
661 ret = -ENOSPC;
662 goto out_err;
663 }
664
665 input.fs = fs;
666 flow = ethtool_rx_flow_rule_create(&input);
667 if (IS_ERR(flow)) {
668 ret = PTR_ERR(flow);
669 goto out_err;
670 }
671 flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
672 flow_rule_match_ports(flow->rule, &ports);
673
674
675 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
676
677
678 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
679
680
681 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
698 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
699 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
700
701
702
703
704 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
705 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
706
707
708
709
710
711 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
712
713
714 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
715
716
717 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
718 ports.key->src, slice_num, false);
719 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
720 ports.mask->src, SLICE_NUM_MASK, true);
721
722
723 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
724
725 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
726 if (ret) {
727 pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
728 goto out_err_flow_rule;
729 }
730
731
732 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
733 queue_num, false);
734 if (ret)
735 goto out_err_flow_rule;
736
737
738 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
739 if (slice_num == UDF_NUM_SLICES) {
740 ret = -EINVAL;
741 goto out_err_flow_rule;
742 }
743
744 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
745
746
747 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
748
749
750
751
752 core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
753 core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
754
755
756
757
758
759
760
761
762 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
763 udf_lower_bits(num_udf) << 8;
764 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
765
766
767 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
768 udf_lower_bits(num_udf) << 8;
769 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
770
771
772 core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
773
774
775 core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
776
777 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
778 ports.key->dst, slice_num, false);
779 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
780 ports.key->dst, SLICE_NUM_MASK, true);
781
782
783 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
784
785 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
786 if (ret) {
787 pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
788 goto out_err_flow_rule;
789 }
790
791
792
793
794 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
795 queue_num, true);
796 if (ret)
797 goto out_err_flow_rule;
798
799
800 reg = core_readl(priv, CORE_CFP_CTL_REG);
801 reg |= BIT(port);
802 core_writel(priv, reg, CORE_CFP_CTL_REG);
803
804
805
806
807 set_bit(rule_index[0], priv->cfp.used);
808 set_bit(rule_index[1], priv->cfp.unique);
809 fs->location = rule_index[1];
810
811 return ret;
812
813 out_err_flow_rule:
814 ethtool_rx_flow_rule_destroy(flow);
815 out_err:
816 clear_bit(rule_index[1], priv->cfp.used);
817 return ret;
818 }
819
820 static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
821 struct ethtool_rx_flow_spec *fs)
822 {
823 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
824 s8 cpu_port = ds->ports[port].cpu_dp->index;
825 __u64 ring_cookie = fs->ring_cookie;
826 unsigned int queue_num, port_num;
827 int ret;
828
829
830
831
832 if (ring_cookie == RX_CLS_FLOW_WAKE)
833 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
834
835
836
837
838
839 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
840
841 if (ring_cookie == RX_CLS_FLOW_DISC ||
842 !(dsa_is_user_port(ds, port_num) ||
843 dsa_is_cpu_port(ds, port_num)) ||
844 port_num >= priv->hw_params.num_ports)
845 return -EINVAL;
846
847
848
849
850 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
851 if (port_num >= 7)
852 port_num -= 1;
853
854 switch (fs->flow_type & ~FLOW_EXT) {
855 case TCP_V4_FLOW:
856 case UDP_V4_FLOW:
857 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
858 queue_num, fs);
859 break;
860 case TCP_V6_FLOW:
861 case UDP_V6_FLOW:
862 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
863 queue_num, fs);
864 break;
865 default:
866 ret = -EINVAL;
867 break;
868 }
869
870 return ret;
871 }
872
873 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
874 struct ethtool_rx_flow_spec *fs)
875 {
876 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
877 struct cfp_rule *rule = NULL;
878 int ret = -EINVAL;
879
880
881 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
882 fs->m_ext.data[1]))
883 return -EINVAL;
884
885 if (fs->location != RX_CLS_LOC_ANY &&
886 fs->location > bcm_sf2_cfp_rule_size(priv))
887 return -EINVAL;
888
889 if (fs->location != RX_CLS_LOC_ANY &&
890 test_bit(fs->location, priv->cfp.used))
891 return -EBUSY;
892
893 ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
894 if (ret == 0)
895 return -EEXIST;
896
897 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
898 if (!rule)
899 return -ENOMEM;
900
901 ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
902 if (ret) {
903 kfree(rule);
904 return ret;
905 }
906
907 rule->port = port;
908 memcpy(&rule->fs, fs, sizeof(*fs));
909 list_add_tail(&rule->next, &priv->cfp.rules_list);
910
911 return ret;
912 }
913
914 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
915 u32 loc, u32 *next_loc)
916 {
917 int ret;
918 u32 reg;
919
920
921 bcm_sf2_cfp_rule_addr_set(priv, loc);
922
923 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
924 if (ret)
925 return ret;
926
927
928
929
930
931 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
932 if (next_loc)
933 *next_loc = (reg >> 24) & CHAIN_ID_MASK;
934
935
936 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
937 reg &= ~SLICE_VALID;
938 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
939
940
941 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
942 if (ret)
943 return ret;
944
945 clear_bit(loc, priv->cfp.used);
946 clear_bit(loc, priv->cfp.unique);
947
948 return 0;
949 }
950
951 static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
952 u32 loc)
953 {
954 u32 next_loc = 0;
955 int ret;
956
957 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
958 if (ret)
959 return ret;
960
961
962 if (next_loc)
963 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
964
965 return ret;
966 }
967
968 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
969 {
970 struct cfp_rule *rule;
971 int ret;
972
973 if (loc > bcm_sf2_cfp_rule_size(priv))
974 return -EINVAL;
975
976
977
978
979
980 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
981 return -EINVAL;
982
983 rule = bcm_sf2_cfp_rule_find(priv, port, loc);
984 if (!rule)
985 return -EINVAL;
986
987 ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
988
989 list_del(&rule->next);
990 kfree(rule);
991
992 return ret;
993 }
994
995 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
996 {
997 unsigned int i;
998
999 for (i = 0; i < sizeof(flow->m_u); i++)
1000 flow->m_u.hdata[i] ^= 0xff;
1001
1002 flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1003 flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1004 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1005 flow->m_ext.data[1] ^= cpu_to_be32(~0);
1006 }
1007
1008 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1009 struct ethtool_rxnfc *nfc)
1010 {
1011 struct cfp_rule *rule;
1012
1013 rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1014 if (!rule)
1015 return -EINVAL;
1016
1017 memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1018
1019 bcm_sf2_invert_masks(&nfc->fs);
1020
1021
1022 nfc->data = bcm_sf2_cfp_rule_size(priv);
1023
1024 return 0;
1025 }
1026
1027
1028 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1029 int port, struct ethtool_rxnfc *nfc,
1030 u32 *rule_locs)
1031 {
1032 unsigned int index = 1, rules_cnt = 0;
1033
1034 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1035 rule_locs[rules_cnt] = index;
1036 rules_cnt++;
1037 }
1038
1039
1040 nfc->data = bcm_sf2_cfp_rule_size(priv);
1041 nfc->rule_cnt = rules_cnt;
1042
1043 return 0;
1044 }
1045
1046 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1047 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1048 {
1049 struct net_device *p = ds->ports[port].cpu_dp->master;
1050 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1051 int ret = 0;
1052
1053 mutex_lock(&priv->cfp.lock);
1054
1055 switch (nfc->cmd) {
1056 case ETHTOOL_GRXCLSRLCNT:
1057
1058 nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1059 priv->num_cfp_rules) - 1;
1060
1061 nfc->data |= RX_CLS_LOC_SPECIAL;
1062 break;
1063 case ETHTOOL_GRXCLSRULE:
1064 ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1065 break;
1066 case ETHTOOL_GRXCLSRLALL:
1067 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1068 break;
1069 default:
1070 ret = -EOPNOTSUPP;
1071 break;
1072 }
1073
1074 mutex_unlock(&priv->cfp.lock);
1075
1076 if (ret)
1077 return ret;
1078
1079
1080 if (p->ethtool_ops->get_rxnfc) {
1081 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1082 if (ret == -EOPNOTSUPP)
1083 ret = 0;
1084 }
1085
1086 return ret;
1087 }
1088
1089 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1090 struct ethtool_rxnfc *nfc)
1091 {
1092 struct net_device *p = ds->ports[port].cpu_dp->master;
1093 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1094 int ret = 0;
1095
1096 mutex_lock(&priv->cfp.lock);
1097
1098 switch (nfc->cmd) {
1099 case ETHTOOL_SRXCLSRLINS:
1100 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1101 break;
1102
1103 case ETHTOOL_SRXCLSRLDEL:
1104 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1105 break;
1106 default:
1107 ret = -EOPNOTSUPP;
1108 break;
1109 }
1110
1111 mutex_unlock(&priv->cfp.lock);
1112
1113 if (ret)
1114 return ret;
1115
1116
1117
1118
1119 if (p->ethtool_ops->set_rxnfc) {
1120 ret = p->ethtool_ops->set_rxnfc(p, nfc);
1121 if (ret && ret != -EOPNOTSUPP) {
1122 mutex_lock(&priv->cfp.lock);
1123 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1124 mutex_unlock(&priv->cfp.lock);
1125 } else {
1126 ret = 0;
1127 }
1128 }
1129
1130 return ret;
1131 }
1132
1133 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1134 {
1135 unsigned int timeout = 1000;
1136 u32 reg;
1137
1138 reg = core_readl(priv, CORE_CFP_ACC);
1139 reg |= TCAM_RESET;
1140 core_writel(priv, reg, CORE_CFP_ACC);
1141
1142 do {
1143 reg = core_readl(priv, CORE_CFP_ACC);
1144 if (!(reg & TCAM_RESET))
1145 break;
1146
1147 cpu_relax();
1148 } while (timeout--);
1149
1150 if (!timeout)
1151 return -ETIMEDOUT;
1152
1153 return 0;
1154 }
1155
1156 void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1157 {
1158 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1159 struct cfp_rule *rule, *n;
1160
1161 if (list_empty(&priv->cfp.rules_list))
1162 return;
1163
1164 list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1165 bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1166 }
1167
1168 int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1169 {
1170 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1171 struct cfp_rule *rule;
1172 int ret = 0;
1173 u32 reg;
1174
1175 if (list_empty(&priv->cfp.rules_list))
1176 return ret;
1177
1178 reg = core_readl(priv, CORE_CFP_CTL_REG);
1179 reg &= ~CFP_EN_MAP_MASK;
1180 core_writel(priv, reg, CORE_CFP_CTL_REG);
1181
1182 ret = bcm_sf2_cfp_rst(priv);
1183 if (ret)
1184 return ret;
1185
1186 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1187 ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1188 rule->fs.location);
1189 if (ret) {
1190 dev_err(ds->dev, "failed to remove rule\n");
1191 return ret;
1192 }
1193
1194 ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1195 if (ret) {
1196 dev_err(ds->dev, "failed to restore rule\n");
1197 return ret;
1198 }
1199 }
1200
1201 return ret;
1202 }
1203
1204 static const struct bcm_sf2_cfp_stat {
1205 unsigned int offset;
1206 unsigned int ram_loc;
1207 const char *name;
1208 } bcm_sf2_cfp_stats[] = {
1209 {
1210 .offset = CORE_STAT_GREEN_CNTR,
1211 .ram_loc = GREEN_STAT_RAM,
1212 .name = "Green"
1213 },
1214 {
1215 .offset = CORE_STAT_YELLOW_CNTR,
1216 .ram_loc = YELLOW_STAT_RAM,
1217 .name = "Yellow"
1218 },
1219 {
1220 .offset = CORE_STAT_RED_CNTR,
1221 .ram_loc = RED_STAT_RAM,
1222 .name = "Red"
1223 },
1224 };
1225
1226 void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1227 u32 stringset, uint8_t *data)
1228 {
1229 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1230 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1231 char buf[ETH_GSTRING_LEN];
1232 unsigned int i, j, iter;
1233
1234 if (stringset != ETH_SS_STATS)
1235 return;
1236
1237 for (i = 1; i < priv->num_cfp_rules; i++) {
1238 for (j = 0; j < s; j++) {
1239 snprintf(buf, sizeof(buf),
1240 "CFP%03d_%sCntr",
1241 i, bcm_sf2_cfp_stats[j].name);
1242 iter = (i - 1) * s + j;
1243 strlcpy(data + iter * ETH_GSTRING_LEN,
1244 buf, ETH_GSTRING_LEN);
1245 }
1246 }
1247 }
1248
1249 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1250 uint64_t *data)
1251 {
1252 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1253 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1254 const struct bcm_sf2_cfp_stat *stat;
1255 unsigned int i, j, iter;
1256 struct cfp_rule *rule;
1257 int ret;
1258
1259 mutex_lock(&priv->cfp.lock);
1260 for (i = 1; i < priv->num_cfp_rules; i++) {
1261 rule = bcm_sf2_cfp_rule_find(priv, port, i);
1262 if (!rule)
1263 continue;
1264
1265 for (j = 0; j < s; j++) {
1266 stat = &bcm_sf2_cfp_stats[j];
1267
1268 bcm_sf2_cfp_rule_addr_set(priv, i);
1269 ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1270 if (ret)
1271 continue;
1272
1273 iter = (i - 1) * s + j;
1274 data[iter] = core_readl(priv, stat->offset);
1275 }
1276
1277 }
1278 mutex_unlock(&priv->cfp.lock);
1279 }
1280
1281 int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1282 {
1283 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1284
1285 if (sset != ETH_SS_STATS)
1286 return 0;
1287
1288
1289 return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);
1290 }