This source file includes following definitions.
- mlxsw_sp_flower_parse_actions
- mlxsw_sp_flower_parse_meta
- mlxsw_sp_flower_parse_ipv4
- mlxsw_sp_flower_parse_ipv6
- mlxsw_sp_flower_parse_ports
- mlxsw_sp_flower_parse_tcp
- mlxsw_sp_flower_parse_ip
- mlxsw_sp_flower_parse
- mlxsw_sp_flower_replace
- mlxsw_sp_flower_destroy
- mlxsw_sp_flower_stats
- mlxsw_sp_flower_tmplt_create
- mlxsw_sp_flower_tmplt_destroy
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 struct mlxsw_sp_acl_block *block,
19 struct mlxsw_sp_acl_rule_info *rulei,
20 struct flow_action *flow_action,
21 struct netlink_ext_ack *extack)
22 {
23 const struct flow_action_entry *act;
24 int mirror_act_count = 0;
25 int err, i;
26
27 if (!flow_action_has_entries(flow_action))
28 return 0;
29
30
31 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
32 if (err)
33 return err;
34
35 flow_action_for_each(i, act, flow_action) {
36 switch (act->id) {
37 case FLOW_ACTION_ACCEPT:
38 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 if (err) {
40 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
41 return err;
42 }
43 break;
44 case FLOW_ACTION_DROP:
45 err = mlxsw_sp_acl_rulei_act_drop(rulei);
46 if (err) {
47 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
48 return err;
49 }
50 break;
51 case FLOW_ACTION_TRAP:
52 err = mlxsw_sp_acl_rulei_act_trap(rulei);
53 if (err) {
54 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
55 return err;
56 }
57 break;
58 case FLOW_ACTION_GOTO: {
59 u32 chain_index = act->chain_index;
60 struct mlxsw_sp_acl_ruleset *ruleset;
61 u16 group_id;
62
63 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
64 chain_index,
65 MLXSW_SP_ACL_PROFILE_FLOWER);
66 if (IS_ERR(ruleset))
67 return PTR_ERR(ruleset);
68
69 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
70 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
71 if (err) {
72 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
73 return err;
74 }
75 }
76 break;
77 case FLOW_ACTION_REDIRECT: {
78 struct net_device *out_dev;
79 struct mlxsw_sp_fid *fid;
80 u16 fid_index;
81
82 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
83 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
84 return -EOPNOTSUPP;
85 }
86
87
88
89
90 rulei->egress_bind_blocker = 1;
91
92 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
93 fid_index = mlxsw_sp_fid_index(fid);
94 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
95 fid_index, extack);
96 if (err)
97 return err;
98
99 out_dev = act->dev;
100 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
101 out_dev, extack);
102 if (err)
103 return err;
104 }
105 break;
106 case FLOW_ACTION_MIRRED: {
107 struct net_device *out_dev = act->dev;
108
109 if (mirror_act_count++) {
110 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
111 return -EOPNOTSUPP;
112 }
113
114 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
115 block, out_dev,
116 extack);
117 if (err)
118 return err;
119 }
120 break;
121 case FLOW_ACTION_VLAN_MANGLE: {
122 u16 proto = be16_to_cpu(act->vlan.proto);
123 u8 prio = act->vlan.prio;
124 u16 vid = act->vlan.vid;
125
126 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
127 act->id, vid,
128 proto, prio, extack);
129 if (err)
130 return err;
131 break;
132 }
133 default:
134 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
135 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
136 return -EOPNOTSUPP;
137 }
138 }
139 return 0;
140 }
141
142 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
143 struct flow_cls_offload *f,
144 struct mlxsw_sp_acl_block *block)
145 {
146 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
147 struct mlxsw_sp_port *mlxsw_sp_port;
148 struct net_device *ingress_dev;
149 struct flow_match_meta match;
150
151 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
152 return 0;
153
154 flow_rule_match_meta(rule, &match);
155 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
156 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
157 return -EINVAL;
158 }
159
160 ingress_dev = __dev_get_by_index(block->net,
161 match.key->ingress_ifindex);
162 if (!ingress_dev) {
163 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
164 return -EINVAL;
165 }
166
167 if (!mlxsw_sp_port_dev_check(ingress_dev)) {
168 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
169 return -EINVAL;
170 }
171
172 mlxsw_sp_port = netdev_priv(ingress_dev);
173 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
174 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
175 return -EINVAL;
176 }
177
178 mlxsw_sp_acl_rulei_keymask_u32(rulei,
179 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
180 mlxsw_sp_port->local_port,
181 0xFFFFFFFF);
182 return 0;
183 }
184
185 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
186 struct flow_cls_offload *f)
187 {
188 struct flow_match_ipv4_addrs match;
189
190 flow_rule_match_ipv4_addrs(f->rule, &match);
191
192 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
193 (char *) &match.key->src,
194 (char *) &match.mask->src, 4);
195 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
196 (char *) &match.key->dst,
197 (char *) &match.mask->dst, 4);
198 }
199
200 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
201 struct flow_cls_offload *f)
202 {
203 struct flow_match_ipv6_addrs match;
204
205 flow_rule_match_ipv6_addrs(f->rule, &match);
206
207 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
208 &match.key->src.s6_addr[0x0],
209 &match.mask->src.s6_addr[0x0], 4);
210 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
211 &match.key->src.s6_addr[0x4],
212 &match.mask->src.s6_addr[0x4], 4);
213 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
214 &match.key->src.s6_addr[0x8],
215 &match.mask->src.s6_addr[0x8], 4);
216 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
217 &match.key->src.s6_addr[0xC],
218 &match.mask->src.s6_addr[0xC], 4);
219 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
220 &match.key->dst.s6_addr[0x0],
221 &match.mask->dst.s6_addr[0x0], 4);
222 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
223 &match.key->dst.s6_addr[0x4],
224 &match.mask->dst.s6_addr[0x4], 4);
225 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
226 &match.key->dst.s6_addr[0x8],
227 &match.mask->dst.s6_addr[0x8], 4);
228 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
229 &match.key->dst.s6_addr[0xC],
230 &match.mask->dst.s6_addr[0xC], 4);
231 }
232
233 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
234 struct mlxsw_sp_acl_rule_info *rulei,
235 struct flow_cls_offload *f,
236 u8 ip_proto)
237 {
238 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
239 struct flow_match_ports match;
240
241 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
242 return 0;
243
244 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
245 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
246 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
247 return -EINVAL;
248 }
249
250 flow_rule_match_ports(rule, &match);
251 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
252 ntohs(match.key->dst),
253 ntohs(match.mask->dst));
254 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
255 ntohs(match.key->src),
256 ntohs(match.mask->src));
257 return 0;
258 }
259
260 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
261 struct mlxsw_sp_acl_rule_info *rulei,
262 struct flow_cls_offload *f,
263 u8 ip_proto)
264 {
265 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
266 struct flow_match_tcp match;
267
268 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
269 return 0;
270
271 if (ip_proto != IPPROTO_TCP) {
272 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
273 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
274 return -EINVAL;
275 }
276
277 flow_rule_match_tcp(rule, &match);
278
279 if (match.mask->flags & htons(0x0E00)) {
280 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
281 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
282 return -EINVAL;
283 }
284
285 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
286 ntohs(match.key->flags),
287 ntohs(match.mask->flags));
288 return 0;
289 }
290
291 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
292 struct mlxsw_sp_acl_rule_info *rulei,
293 struct flow_cls_offload *f,
294 u16 n_proto)
295 {
296 const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
297 struct flow_match_ip match;
298
299 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
300 return 0;
301
302 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
303 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
304 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
305 return -EINVAL;
306 }
307
308 flow_rule_match_ip(rule, &match);
309
310 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
311 match.key->ttl, match.mask->ttl);
312
313 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
314 match.key->tos & 0x3,
315 match.mask->tos & 0x3);
316
317 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
318 match.key->tos >> 2,
319 match.mask->tos >> 2);
320
321 return 0;
322 }
323
324 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
325 struct mlxsw_sp_acl_block *block,
326 struct mlxsw_sp_acl_rule_info *rulei,
327 struct flow_cls_offload *f)
328 {
329 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
330 struct flow_dissector *dissector = rule->match.dissector;
331 u16 n_proto_mask = 0;
332 u16 n_proto_key = 0;
333 u16 addr_type = 0;
334 u8 ip_proto = 0;
335 int err;
336
337 if (dissector->used_keys &
338 ~(BIT(FLOW_DISSECTOR_KEY_META) |
339 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
340 BIT(FLOW_DISSECTOR_KEY_BASIC) |
341 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
342 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
343 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
344 BIT(FLOW_DISSECTOR_KEY_PORTS) |
345 BIT(FLOW_DISSECTOR_KEY_TCP) |
346 BIT(FLOW_DISSECTOR_KEY_IP) |
347 BIT(FLOW_DISSECTOR_KEY_VLAN))) {
348 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
349 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
350 return -EOPNOTSUPP;
351 }
352
353 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
354
355 err = mlxsw_sp_flower_parse_meta(rulei, f, block);
356 if (err)
357 return err;
358
359 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
360 struct flow_match_control match;
361
362 flow_rule_match_control(rule, &match);
363 addr_type = match.key->addr_type;
364 }
365
366 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
367 struct flow_match_basic match;
368
369 flow_rule_match_basic(rule, &match);
370 n_proto_key = ntohs(match.key->n_proto);
371 n_proto_mask = ntohs(match.mask->n_proto);
372
373 if (n_proto_key == ETH_P_ALL) {
374 n_proto_key = 0;
375 n_proto_mask = 0;
376 }
377 mlxsw_sp_acl_rulei_keymask_u32(rulei,
378 MLXSW_AFK_ELEMENT_ETHERTYPE,
379 n_proto_key, n_proto_mask);
380
381 ip_proto = match.key->ip_proto;
382 mlxsw_sp_acl_rulei_keymask_u32(rulei,
383 MLXSW_AFK_ELEMENT_IP_PROTO,
384 match.key->ip_proto,
385 match.mask->ip_proto);
386 }
387
388 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
389 struct flow_match_eth_addrs match;
390
391 flow_rule_match_eth_addrs(rule, &match);
392 mlxsw_sp_acl_rulei_keymask_buf(rulei,
393 MLXSW_AFK_ELEMENT_DMAC_32_47,
394 match.key->dst,
395 match.mask->dst, 2);
396 mlxsw_sp_acl_rulei_keymask_buf(rulei,
397 MLXSW_AFK_ELEMENT_DMAC_0_31,
398 match.key->dst + 2,
399 match.mask->dst + 2, 4);
400 mlxsw_sp_acl_rulei_keymask_buf(rulei,
401 MLXSW_AFK_ELEMENT_SMAC_32_47,
402 match.key->src,
403 match.mask->src, 2);
404 mlxsw_sp_acl_rulei_keymask_buf(rulei,
405 MLXSW_AFK_ELEMENT_SMAC_0_31,
406 match.key->src + 2,
407 match.mask->src + 2, 4);
408 }
409
410 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
411 struct flow_match_vlan match;
412
413 flow_rule_match_vlan(rule, &match);
414 if (mlxsw_sp_acl_block_is_egress_bound(block)) {
415 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
416 return -EOPNOTSUPP;
417 }
418
419
420
421
422 rulei->egress_bind_blocker = 1;
423
424 if (match.mask->vlan_id != 0)
425 mlxsw_sp_acl_rulei_keymask_u32(rulei,
426 MLXSW_AFK_ELEMENT_VID,
427 match.key->vlan_id,
428 match.mask->vlan_id);
429 if (match.mask->vlan_priority != 0)
430 mlxsw_sp_acl_rulei_keymask_u32(rulei,
431 MLXSW_AFK_ELEMENT_PCP,
432 match.key->vlan_priority,
433 match.mask->vlan_priority);
434 }
435
436 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
437 mlxsw_sp_flower_parse_ipv4(rulei, f);
438
439 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
440 mlxsw_sp_flower_parse_ipv6(rulei, f);
441
442 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
443 if (err)
444 return err;
445 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
446 if (err)
447 return err;
448
449 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
450 if (err)
451 return err;
452
453 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
454 &f->rule->action,
455 f->common.extack);
456 }
457
458 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
459 struct mlxsw_sp_acl_block *block,
460 struct flow_cls_offload *f)
461 {
462 struct mlxsw_sp_acl_rule_info *rulei;
463 struct mlxsw_sp_acl_ruleset *ruleset;
464 struct mlxsw_sp_acl_rule *rule;
465 int err;
466
467 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
468 f->common.chain_index,
469 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
470 if (IS_ERR(ruleset))
471 return PTR_ERR(ruleset);
472
473 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
474 f->common.extack);
475 if (IS_ERR(rule)) {
476 err = PTR_ERR(rule);
477 goto err_rule_create;
478 }
479
480 rulei = mlxsw_sp_acl_rule_rulei(rule);
481 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
482 if (err)
483 goto err_flower_parse;
484
485 err = mlxsw_sp_acl_rulei_commit(rulei);
486 if (err)
487 goto err_rulei_commit;
488
489 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
490 if (err)
491 goto err_rule_add;
492
493 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
494 return 0;
495
496 err_rule_add:
497 err_rulei_commit:
498 err_flower_parse:
499 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
500 err_rule_create:
501 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
502 return err;
503 }
504
505 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
506 struct mlxsw_sp_acl_block *block,
507 struct flow_cls_offload *f)
508 {
509 struct mlxsw_sp_acl_ruleset *ruleset;
510 struct mlxsw_sp_acl_rule *rule;
511
512 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
513 f->common.chain_index,
514 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
515 if (IS_ERR(ruleset))
516 return;
517
518 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
519 if (rule) {
520 mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
521 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
522 }
523
524 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
525 }
526
527 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
528 struct mlxsw_sp_acl_block *block,
529 struct flow_cls_offload *f)
530 {
531 struct mlxsw_sp_acl_ruleset *ruleset;
532 struct mlxsw_sp_acl_rule *rule;
533 u64 packets;
534 u64 lastuse;
535 u64 bytes;
536 int err;
537
538 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
539 f->common.chain_index,
540 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
541 if (WARN_ON(IS_ERR(ruleset)))
542 return -EINVAL;
543
544 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
545 if (!rule)
546 return -EINVAL;
547
548 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
549 &lastuse);
550 if (err)
551 goto err_rule_get_stats;
552
553 flow_stats_update(&f->stats, bytes, packets, lastuse);
554
555 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
556 return 0;
557
558 err_rule_get_stats:
559 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
560 return err;
561 }
562
563 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
564 struct mlxsw_sp_acl_block *block,
565 struct flow_cls_offload *f)
566 {
567 struct mlxsw_sp_acl_ruleset *ruleset;
568 struct mlxsw_sp_acl_rule_info rulei;
569 int err;
570
571 memset(&rulei, 0, sizeof(rulei));
572 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
573 if (err)
574 return err;
575 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
576 f->common.chain_index,
577 MLXSW_SP_ACL_PROFILE_FLOWER,
578 &rulei.values.elusage);
579
580
581 return PTR_ERR_OR_ZERO(ruleset);
582 }
583
584 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
585 struct mlxsw_sp_acl_block *block,
586 struct flow_cls_offload *f)
587 {
588 struct mlxsw_sp_acl_ruleset *ruleset;
589
590 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
591 f->common.chain_index,
592 MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
593 if (IS_ERR(ruleset))
594 return;
595
596 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
597 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
598 }