This source file includes following definitions.
- nfp_fl_push_mpls
- nfp_fl_pop_mpls
- nfp_fl_set_mpls
- nfp_fl_pop_vlan
- nfp_fl_push_vlan
- nfp_fl_pre_lag
- nfp_fl_output
- nfp_flower_tun_is_gre
- nfp_fl_get_tun_from_act
- nfp_fl_push_geneve_options
- nfp_fl_set_ipv4_tun
- nfp_fl_set_helper32
- nfp_fl_set_eth
- nfp_fl_set_ip4
- nfp_fl_set_ip6_helper
- nfp_fl_set_ip6_hop_limit_flow_label
- nfp_fl_set_ip6
- nfp_fl_set_tport
- nfp_fl_csum_l4_to_flag
- nfp_fl_commit_mangle
- nfp_fl_pedit
- nfp_flower_output_action
- nfp_flower_loop_action
- nfp_fl_check_mangle_start
- nfp_fl_check_mangle_end
- nfp_flower_compile_action
1
2
3
4 #include <linux/bitfield.h>
5 #include <linux/mpls.h>
6 #include <net/pkt_cls.h>
7 #include <net/tc_act/tc_csum.h>
8 #include <net/tc_act/tc_gact.h>
9 #include <net/tc_act/tc_mirred.h>
10 #include <net/tc_act/tc_mpls.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
14
15 #include "cmsg.h"
16 #include "main.h"
17 #include "../nfp_net_repr.h"
18
19
20
21
22 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
26 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
27 NFP_FL_TUNNEL_KEY | \
28 NFP_FL_TUNNEL_GENEVE_OPT)
29
30 static int
31 nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
32 const struct flow_action_entry *act,
33 struct netlink_ext_ack *extack)
34 {
35 size_t act_size = sizeof(struct nfp_fl_push_mpls);
36 u32 mpls_lse = 0;
37
38 push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
39 push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
40
41
42 if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
43 mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
44 } else {
45 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
46 return -EOPNOTSUPP;
47 }
48
49
50 if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
51 mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
52
53
54 mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
55 mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
56 push_mpls->ethtype = act->mpls_push.proto;
57 push_mpls->lse = cpu_to_be32(mpls_lse);
58
59 return 0;
60 }
61
62 static void
63 nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
64 const struct flow_action_entry *act)
65 {
66 size_t act_size = sizeof(struct nfp_fl_pop_mpls);
67
68 pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
69 pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
70 pop_mpls->ethtype = act->mpls_pop.proto;
71 }
72
73 static void
74 nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
75 const struct flow_action_entry *act)
76 {
77 size_t act_size = sizeof(struct nfp_fl_set_mpls);
78 u32 mpls_lse = 0, mpls_mask = 0;
79
80 set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
81 set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
82
83 if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
84 mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
85 mpls_mask |= MPLS_LS_LABEL_MASK;
86 }
87 if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
88 mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
89 mpls_mask |= MPLS_LS_TC_MASK;
90 }
91 if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
92 mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
93 mpls_mask |= MPLS_LS_S_MASK;
94 }
95 if (act->mpls_mangle.ttl) {
96 mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
97 mpls_mask |= MPLS_LS_TTL_MASK;
98 }
99
100 set_mpls->lse = cpu_to_be32(mpls_lse);
101 set_mpls->lse_mask = cpu_to_be32(mpls_mask);
102 }
103
104 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
105 {
106 size_t act_size = sizeof(struct nfp_fl_pop_vlan);
107
108 pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
109 pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
110 pop_vlan->reserved = 0;
111 }
112
113 static void
114 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
115 const struct flow_action_entry *act)
116 {
117 size_t act_size = sizeof(struct nfp_fl_push_vlan);
118 u16 tmp_push_vlan_tci;
119
120 push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
121 push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
122 push_vlan->reserved = 0;
123 push_vlan->vlan_tpid = act->vlan.proto;
124
125 tmp_push_vlan_tci =
126 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
127 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
128 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
129 }
130
131 static int
132 nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
133 struct nfp_fl_payload *nfp_flow, int act_len,
134 struct netlink_ext_ack *extack)
135 {
136 size_t act_size = sizeof(struct nfp_fl_pre_lag);
137 struct nfp_fl_pre_lag *pre_lag;
138 struct net_device *out_dev;
139 int err;
140
141 out_dev = act->dev;
142 if (!out_dev || !netif_is_lag_master(out_dev))
143 return 0;
144
145 if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
146 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
147 return -EOPNOTSUPP;
148 }
149
150
151
152
153 if (act_len)
154 memmove(nfp_flow->action_data + act_size,
155 nfp_flow->action_data, act_len);
156
157 pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
158 err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
159 if (err)
160 return err;
161
162 pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
163 pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
164
165 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
166
167 return act_size;
168 }
169
170 static int
171 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
172 const struct flow_action_entry *act,
173 struct nfp_fl_payload *nfp_flow,
174 bool last, struct net_device *in_dev,
175 enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
176 bool pkt_host, struct netlink_ext_ack *extack)
177 {
178 size_t act_size = sizeof(struct nfp_fl_output);
179 struct nfp_flower_priv *priv = app->priv;
180 struct net_device *out_dev;
181 u16 tmp_flags;
182
183 output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
184 output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
185
186 out_dev = act->dev;
187 if (!out_dev) {
188 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
189 return -EOPNOTSUPP;
190 }
191
192 tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
193
194 if (tun_type) {
195
196 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
197 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
198 return -EOPNOTSUPP;
199 }
200
201 if (*tun_out_cnt) {
202 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
203 return -EOPNOTSUPP;
204 }
205 (*tun_out_cnt)++;
206
207 output->flags = cpu_to_be16(tmp_flags |
208 NFP_FL_OUT_FLAGS_USE_TUN);
209 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
210 } else if (netif_is_lag_master(out_dev) &&
211 priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
212 int gid;
213
214 output->flags = cpu_to_be16(tmp_flags);
215 gid = nfp_flower_lag_get_output_id(app, out_dev);
216 if (gid < 0) {
217 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
218 return gid;
219 }
220 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
221 } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
222 if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
223 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
224 return -EOPNOTSUPP;
225 }
226
227 if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
228 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
229 return -EOPNOTSUPP;
230 }
231
232 nfp_flow->pre_tun_rule.dev = out_dev;
233
234 return 0;
235 } else {
236
237 output->flags = cpu_to_be16(tmp_flags);
238
239 if (nfp_netdev_is_nfp_repr(in_dev)) {
240
241 if (!netdev_port_same_parent_id(in_dev, out_dev)) {
242 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
243 return -EOPNOTSUPP;
244 }
245 }
246
247 if (!nfp_netdev_is_nfp_repr(out_dev)) {
248 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
249 return -EOPNOTSUPP;
250 }
251
252 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
253 if (!output->port) {
254 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
255 return -EOPNOTSUPP;
256 }
257 }
258 nfp_flow->meta.shortcut = output->port;
259
260 return 0;
261 }
262
263 static bool
264 nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
265 {
266 struct flow_action_entry *act = flow->rule->action.entries;
267 int num_act = flow->rule->action.num_entries;
268 int act_idx;
269
270
271 for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
272 if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
273 act[act_idx].id == FLOW_ACTION_MIRRED)
274 return netif_is_gretap(act[act_idx].dev);
275
276 return false;
277 }
278
279 static enum nfp_flower_tun_type
280 nfp_fl_get_tun_from_act(struct nfp_app *app,
281 struct flow_cls_offload *flow,
282 const struct flow_action_entry *act, int act_idx)
283 {
284 const struct ip_tunnel_info *tun = act->tunnel;
285 struct nfp_flower_priv *priv = app->priv;
286
287
288
289
290 if (nfp_flower_tun_is_gre(flow, act_idx))
291 return NFP_FL_TUNNEL_GRE;
292
293 switch (tun->key.tp_dst) {
294 case htons(IANA_VXLAN_UDP_PORT):
295 return NFP_FL_TUNNEL_VXLAN;
296 case htons(GENEVE_UDP_PORT):
297 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
298 return NFP_FL_TUNNEL_GENEVE;
299
300 default:
301 return NFP_FL_TUNNEL_NONE;
302 }
303 }
304
305 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
306 {
307 size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
308 struct nfp_fl_pre_tunnel *pre_tun_act;
309
310
311
312
313 if (act_len)
314 memmove(act_data + act_size, act_data, act_len);
315
316 pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
317
318 memset(pre_tun_act, 0, act_size);
319
320 pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
321 pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
322
323 return pre_tun_act;
324 }
325
326 static int
327 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
328 const struct flow_action_entry *act,
329 struct netlink_ext_ack *extack)
330 {
331 struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
332 int opt_len, opt_cnt, act_start, tot_push_len;
333 u8 *src = ip_tunnel_info_opts(ip_tun);
334
335
336
337
338
339
340 opt_cnt = 0;
341 tot_push_len = 0;
342 opt_len = ip_tun->options_len;
343 while (opt_len > 0) {
344 struct geneve_opt *opt = (struct geneve_opt *)src;
345
346 opt_cnt++;
347 if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
348 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
349 return -EOPNOTSUPP;
350 }
351
352 tot_push_len += sizeof(struct nfp_fl_push_geneve) +
353 opt->length * 4;
354 if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
355 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
356 return -EOPNOTSUPP;
357 }
358
359 opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
360 src += sizeof(struct geneve_opt) + opt->length * 4;
361 }
362
363 if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
364 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
365 return -EOPNOTSUPP;
366 }
367
368 act_start = *list_len;
369 *list_len += tot_push_len;
370 src = ip_tunnel_info_opts(ip_tun);
371 while (opt_cnt) {
372 struct geneve_opt *opt = (struct geneve_opt *)src;
373 struct nfp_fl_push_geneve *push;
374 size_t act_size, len;
375
376 opt_cnt--;
377 act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
378 tot_push_len -= act_size;
379 len = act_start + tot_push_len;
380
381 push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
382 push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
383 push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
384 push->reserved = 0;
385 push->class = opt->opt_class;
386 push->type = opt->type;
387 push->length = opt->length;
388 memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
389
390 src += sizeof(struct geneve_opt) + opt->length * 4;
391 }
392
393 return 0;
394 }
395
396 static int
397 nfp_fl_set_ipv4_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_tun *set_tun,
398 const struct flow_action_entry *act,
399 struct nfp_fl_pre_tunnel *pre_tun,
400 enum nfp_flower_tun_type tun_type,
401 struct net_device *netdev, struct netlink_ext_ack *extack)
402 {
403 size_t act_size = sizeof(struct nfp_fl_set_ipv4_tun);
404 const struct ip_tunnel_info *ip_tun = act->tunnel;
405 struct nfp_flower_priv *priv = app->priv;
406 u32 tmp_set_ip_tun_type_index = 0;
407
408 int pretun_idx = 0;
409
410 BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
411 NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
412 NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
413 if (ip_tun->options_len &&
414 (tun_type != NFP_FL_TUNNEL_GENEVE ||
415 !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
416 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
417 return -EOPNOTSUPP;
418 }
419
420 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
421 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
422
423
424 tmp_set_ip_tun_type_index |=
425 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
426 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
427
428 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
429 set_tun->tun_id = ip_tun->key.tun_id;
430
431 if (ip_tun->key.ttl) {
432 set_tun->ttl = ip_tun->key.ttl;
433 } else {
434 struct net *net = dev_net(netdev);
435 struct flowi4 flow = {};
436 struct rtable *rt;
437 int err;
438
439
440
441
442
443 flow.daddr = ip_tun->key.u.ipv4.dst;
444 flow.flowi4_proto = IPPROTO_UDP;
445 rt = ip_route_output_key(net, &flow);
446 err = PTR_ERR_OR_ZERO(rt);
447 if (!err) {
448 set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
449 ip_rt_put(rt);
450 } else {
451 set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
452 }
453 }
454
455 set_tun->tos = ip_tun->key.tos;
456
457 if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
458 ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) {
459 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
460 return -EOPNOTSUPP;
461 }
462 set_tun->tun_flags = ip_tun->key.tun_flags;
463
464 if (tun_type == NFP_FL_TUNNEL_GENEVE) {
465 set_tun->tun_proto = htons(ETH_P_TEB);
466 set_tun->tun_len = ip_tun->options_len / 4;
467 }
468
469
470 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
471
472 return 0;
473 }
474
475 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
476 {
477 u32 oldvalue = get_unaligned((u32 *)p_exact);
478 u32 oldmask = get_unaligned((u32 *)p_mask);
479
480 value &= mask;
481 value |= oldvalue & ~mask;
482
483 put_unaligned(oldmask | mask, (u32 *)p_mask);
484 put_unaligned(value, (u32 *)p_exact);
485 }
486
487 static int
488 nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
489 struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
490 {
491 u32 exact, mask;
492
493 if (off + 4 > ETH_ALEN * 2) {
494 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
495 return -EOPNOTSUPP;
496 }
497
498 mask = ~act->mangle.mask;
499 exact = act->mangle.val;
500
501 if (exact & ~mask) {
502 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
503 return -EOPNOTSUPP;
504 }
505
506 nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
507 &set_eth->eth_addr_mask[off]);
508
509 set_eth->reserved = cpu_to_be16(0);
510 set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
511 set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
512
513 return 0;
514 }
515
516 struct ipv4_ttl_word {
517 __u8 ttl;
518 __u8 protocol;
519 __sum16 check;
520 };
521
522 static int
523 nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
524 struct nfp_fl_set_ip4_addrs *set_ip_addr,
525 struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
526 struct netlink_ext_ack *extack)
527 {
528 struct ipv4_ttl_word *ttl_word_mask;
529 struct ipv4_ttl_word *ttl_word;
530 struct iphdr *tos_word_mask;
531 struct iphdr *tos_word;
532 __be32 exact, mask;
533
534
535 mask = (__force __be32)~act->mangle.mask;
536 exact = (__force __be32)act->mangle.val;
537
538 if (exact & ~mask) {
539 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
540 return -EOPNOTSUPP;
541 }
542
543 switch (off) {
544 case offsetof(struct iphdr, daddr):
545 set_ip_addr->ipv4_dst_mask |= mask;
546 set_ip_addr->ipv4_dst &= ~mask;
547 set_ip_addr->ipv4_dst |= exact & mask;
548 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
549 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
550 NFP_FL_LW_SIZ;
551 break;
552 case offsetof(struct iphdr, saddr):
553 set_ip_addr->ipv4_src_mask |= mask;
554 set_ip_addr->ipv4_src &= ~mask;
555 set_ip_addr->ipv4_src |= exact & mask;
556 set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
557 set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
558 NFP_FL_LW_SIZ;
559 break;
560 case offsetof(struct iphdr, ttl):
561 ttl_word_mask = (struct ipv4_ttl_word *)&mask;
562 ttl_word = (struct ipv4_ttl_word *)&exact;
563
564 if (ttl_word_mask->protocol || ttl_word_mask->check) {
565 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
566 return -EOPNOTSUPP;
567 }
568
569 set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
570 set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
571 set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
572 set_ip_ttl_tos->head.jump_id =
573 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
574 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
575 NFP_FL_LW_SIZ;
576 break;
577 case round_down(offsetof(struct iphdr, tos), 4):
578 tos_word_mask = (struct iphdr *)&mask;
579 tos_word = (struct iphdr *)&exact;
580
581 if (tos_word_mask->version || tos_word_mask->ihl ||
582 tos_word_mask->tot_len) {
583 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
584 return -EOPNOTSUPP;
585 }
586
587 set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
588 set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
589 set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
590 set_ip_ttl_tos->head.jump_id =
591 NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
592 set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
593 NFP_FL_LW_SIZ;
594 break;
595 default:
596 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
597 return -EOPNOTSUPP;
598 }
599
600 return 0;
601 }
602
603 static void
604 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
605 struct nfp_fl_set_ipv6_addr *ip6)
606 {
607 ip6->ipv6[word].mask |= mask;
608 ip6->ipv6[word].exact &= ~mask;
609 ip6->ipv6[word].exact |= exact & mask;
610
611 ip6->reserved = cpu_to_be16(0);
612 ip6->head.jump_id = opcode_tag;
613 ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
614 }
615
616 struct ipv6_hop_limit_word {
617 __be16 payload_len;
618 u8 nexthdr;
619 u8 hop_limit;
620 };
621
622 static int
623 nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
624 struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
625 struct netlink_ext_ack *extack)
626 {
627 struct ipv6_hop_limit_word *fl_hl_mask;
628 struct ipv6_hop_limit_word *fl_hl;
629
630 switch (off) {
631 case offsetof(struct ipv6hdr, payload_len):
632 fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
633 fl_hl = (struct ipv6_hop_limit_word *)&exact;
634
635 if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
636 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
637 return -EOPNOTSUPP;
638 }
639
640 ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
641 ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
642 ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
643 fl_hl_mask->hop_limit;
644 break;
645 case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
646 if (mask & ~IPV6_FLOW_LABEL_MASK ||
647 exact & ~IPV6_FLOW_LABEL_MASK) {
648 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
649 return -EOPNOTSUPP;
650 }
651
652 ip_hl_fl->ipv6_label_mask |= mask;
653 ip_hl_fl->ipv6_label &= ~mask;
654 ip_hl_fl->ipv6_label |= exact & mask;
655 break;
656 }
657
658 ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
659 ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
660
661 return 0;
662 }
663
664 static int
665 nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
666 struct nfp_fl_set_ipv6_addr *ip_dst,
667 struct nfp_fl_set_ipv6_addr *ip_src,
668 struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
669 struct netlink_ext_ack *extack)
670 {
671 __be32 exact, mask;
672 int err = 0;
673 u8 word;
674
675
676 mask = (__force __be32)~act->mangle.mask;
677 exact = (__force __be32)act->mangle.val;
678
679 if (exact & ~mask) {
680 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
681 return -EOPNOTSUPP;
682 }
683
684 if (off < offsetof(struct ipv6hdr, saddr)) {
685 err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
686 ip_hl_fl, extack);
687 } else if (off < offsetof(struct ipv6hdr, daddr)) {
688 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
689 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
690 exact, mask, ip_src);
691 } else if (off < offsetof(struct ipv6hdr, daddr) +
692 sizeof(struct in6_addr)) {
693 word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
694 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
695 exact, mask, ip_dst);
696 } else {
697 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
698 return -EOPNOTSUPP;
699 }
700
701 return err;
702 }
703
704 static int
705 nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
706 struct nfp_fl_set_tport *set_tport, int opcode,
707 struct netlink_ext_ack *extack)
708 {
709 u32 exact, mask;
710
711 if (off) {
712 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
713 return -EOPNOTSUPP;
714 }
715
716 mask = ~act->mangle.mask;
717 exact = act->mangle.val;
718
719 if (exact & ~mask) {
720 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
721 return -EOPNOTSUPP;
722 }
723
724 nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
725 set_tport->tp_port_mask);
726
727 set_tport->reserved = cpu_to_be16(0);
728 set_tport->head.jump_id = opcode;
729 set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
730
731 return 0;
732 }
733
734 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
735 {
736 switch (ip_proto) {
737 case 0:
738
739
740
741 return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
742 case IPPROTO_TCP:
743 return TCA_CSUM_UPDATE_FLAG_TCP;
744 case IPPROTO_UDP:
745 return TCA_CSUM_UPDATE_FLAG_UDP;
746 default:
747
748 return 0;
749 }
750 }
751
752 struct nfp_flower_pedit_acts {
753 struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
754 struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
755 struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
756 struct nfp_fl_set_ip4_addrs set_ip_addr;
757 struct nfp_fl_set_tport set_tport;
758 struct nfp_fl_set_eth set_eth;
759 };
760
761 static int
762 nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
763 int *a_len, struct nfp_flower_pedit_acts *set_act,
764 u32 *csum_updated)
765 {
766 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
767 size_t act_size = 0;
768 u8 ip_proto = 0;
769
770 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
771 struct flow_match_basic match;
772
773 flow_rule_match_basic(rule, &match);
774 ip_proto = match.key->ip_proto;
775 }
776
777 if (set_act->set_eth.head.len_lw) {
778 act_size = sizeof(set_act->set_eth);
779 memcpy(nfp_action, &set_act->set_eth, act_size);
780 *a_len += act_size;
781 }
782
783 if (set_act->set_ip_ttl_tos.head.len_lw) {
784 nfp_action += act_size;
785 act_size = sizeof(set_act->set_ip_ttl_tos);
786 memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
787 *a_len += act_size;
788
789
790 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
791 nfp_fl_csum_l4_to_flag(ip_proto);
792 }
793
794 if (set_act->set_ip_addr.head.len_lw) {
795 nfp_action += act_size;
796 act_size = sizeof(set_act->set_ip_addr);
797 memcpy(nfp_action, &set_act->set_ip_addr, act_size);
798 *a_len += act_size;
799
800
801 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
802 nfp_fl_csum_l4_to_flag(ip_proto);
803 }
804
805 if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
806 nfp_action += act_size;
807 act_size = sizeof(set_act->set_ip6_tc_hl_fl);
808 memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
809 *a_len += act_size;
810
811
812 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
813 }
814
815 if (set_act->set_ip6_dst.head.len_lw &&
816 set_act->set_ip6_src.head.len_lw) {
817
818
819
820 nfp_action += act_size;
821 act_size = sizeof(set_act->set_ip6_src);
822 memcpy(nfp_action, &set_act->set_ip6_src, act_size);
823 *a_len += act_size;
824
825 act_size = sizeof(set_act->set_ip6_dst);
826 memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
827 &set_act->set_ip6_dst, act_size);
828 *a_len += act_size;
829
830
831 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
832 } else if (set_act->set_ip6_dst.head.len_lw) {
833 nfp_action += act_size;
834 act_size = sizeof(set_act->set_ip6_dst);
835 memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
836 *a_len += act_size;
837
838
839 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
840 } else if (set_act->set_ip6_src.head.len_lw) {
841 nfp_action += act_size;
842 act_size = sizeof(set_act->set_ip6_src);
843 memcpy(nfp_action, &set_act->set_ip6_src, act_size);
844 *a_len += act_size;
845
846
847 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
848 }
849 if (set_act->set_tport.head.len_lw) {
850 nfp_action += act_size;
851 act_size = sizeof(set_act->set_tport);
852 memcpy(nfp_action, &set_act->set_tport, act_size);
853 *a_len += act_size;
854
855
856 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
857 }
858
859 return 0;
860 }
861
862 static int
863 nfp_fl_pedit(const struct flow_action_entry *act,
864 struct flow_cls_offload *flow, char *nfp_action, int *a_len,
865 u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
866 struct netlink_ext_ack *extack)
867 {
868 enum flow_action_mangle_base htype;
869 u32 offset;
870
871 htype = act->mangle.htype;
872 offset = act->mangle.offset;
873
874 switch (htype) {
875 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
876 return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
877 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
878 return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
879 &set_act->set_ip_ttl_tos, extack);
880 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
881 return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
882 &set_act->set_ip6_src,
883 &set_act->set_ip6_tc_hl_fl, extack);
884 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
885 return nfp_fl_set_tport(act, offset, &set_act->set_tport,
886 NFP_FL_ACTION_OPCODE_SET_TCP, extack);
887 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
888 return nfp_fl_set_tport(act, offset, &set_act->set_tport,
889 NFP_FL_ACTION_OPCODE_SET_UDP, extack);
890 default:
891 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
892 return -EOPNOTSUPP;
893 }
894 }
895
896 static int
897 nfp_flower_output_action(struct nfp_app *app,
898 const struct flow_action_entry *act,
899 struct nfp_fl_payload *nfp_fl, int *a_len,
900 struct net_device *netdev, bool last,
901 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
902 int *out_cnt, u32 *csum_updated, bool pkt_host,
903 struct netlink_ext_ack *extack)
904 {
905 struct nfp_flower_priv *priv = app->priv;
906 struct nfp_fl_output *output;
907 int err, prelag_size;
908
909
910
911
912 if (*csum_updated) {
913 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
914 return -EOPNOTSUPP;
915 }
916
917 if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
918 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
919 return -EOPNOTSUPP;
920 }
921
922 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
923 err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
924 tun_out_cnt, pkt_host, extack);
925 if (err)
926 return err;
927
928 *a_len += sizeof(struct nfp_fl_output);
929
930 if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
931
932
933
934 prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
935 if (prelag_size < 0) {
936 return prelag_size;
937 } else if (prelag_size > 0 && (!last || *out_cnt)) {
938 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
939 return -EOPNOTSUPP;
940 }
941
942 *a_len += prelag_size;
943 }
944 (*out_cnt)++;
945
946 return 0;
947 }
948
949 static int
950 nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
951 struct flow_cls_offload *flow,
952 struct nfp_fl_payload *nfp_fl, int *a_len,
953 struct net_device *netdev,
954 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
955 int *out_cnt, u32 *csum_updated,
956 struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
957 struct netlink_ext_ack *extack, int act_idx)
958 {
959 struct nfp_fl_set_ipv4_tun *set_tun;
960 struct nfp_fl_pre_tunnel *pre_tun;
961 struct nfp_fl_push_vlan *psh_v;
962 struct nfp_fl_push_mpls *psh_m;
963 struct nfp_fl_pop_vlan *pop_v;
964 struct nfp_fl_pop_mpls *pop_m;
965 struct nfp_fl_set_mpls *set_m;
966 int err;
967
968 switch (act->id) {
969 case FLOW_ACTION_DROP:
970 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
971 break;
972 case FLOW_ACTION_REDIRECT_INGRESS:
973 case FLOW_ACTION_REDIRECT:
974 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
975 true, tun_type, tun_out_cnt,
976 out_cnt, csum_updated, *pkt_host,
977 extack);
978 if (err)
979 return err;
980 break;
981 case FLOW_ACTION_MIRRED_INGRESS:
982 case FLOW_ACTION_MIRRED:
983 err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
984 false, tun_type, tun_out_cnt,
985 out_cnt, csum_updated, *pkt_host,
986 extack);
987 if (err)
988 return err;
989 break;
990 case FLOW_ACTION_VLAN_POP:
991 if (*a_len +
992 sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
993 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
994 return -EOPNOTSUPP;
995 }
996
997 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
998 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
999
1000 nfp_fl_pop_vlan(pop_v);
1001 *a_len += sizeof(struct nfp_fl_pop_vlan);
1002 break;
1003 case FLOW_ACTION_VLAN_PUSH:
1004 if (*a_len +
1005 sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
1006 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
1007 return -EOPNOTSUPP;
1008 }
1009
1010 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
1011 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1012
1013 nfp_fl_push_vlan(psh_v, act);
1014 *a_len += sizeof(struct nfp_fl_push_vlan);
1015 break;
1016 case FLOW_ACTION_TUNNEL_ENCAP: {
1017 const struct ip_tunnel_info *ip_tun = act->tunnel;
1018
1019 *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
1020 if (*tun_type == NFP_FL_TUNNEL_NONE) {
1021 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
1022 return -EOPNOTSUPP;
1023 }
1024
1025 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
1026 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
1027 return -EOPNOTSUPP;
1028 }
1029
1030
1031
1032
1033
1034 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
1035 sizeof(struct nfp_fl_set_ipv4_tun) > NFP_FL_MAX_A_SIZ) {
1036 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
1037 return -EOPNOTSUPP;
1038 }
1039
1040 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
1041 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1042 *a_len += sizeof(struct nfp_fl_pre_tunnel);
1043
1044 err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
1045 if (err)
1046 return err;
1047
1048 set_tun = (void *)&nfp_fl->action_data[*a_len];
1049 err = nfp_fl_set_ipv4_tun(app, set_tun, act, pre_tun,
1050 *tun_type, netdev, extack);
1051 if (err)
1052 return err;
1053 *a_len += sizeof(struct nfp_fl_set_ipv4_tun);
1054 }
1055 break;
1056 case FLOW_ACTION_TUNNEL_DECAP:
1057
1058 return 0;
1059 case FLOW_ACTION_MANGLE:
1060 if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
1061 a_len, csum_updated, set_act, extack))
1062 return -EOPNOTSUPP;
1063 break;
1064 case FLOW_ACTION_CSUM:
1065
1066 if (act->csum_flags & ~*csum_updated) {
1067 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
1068 return -EOPNOTSUPP;
1069 }
1070
1071
1072
1073 *csum_updated &= ~act->csum_flags;
1074 break;
1075 case FLOW_ACTION_MPLS_PUSH:
1076 if (*a_len +
1077 sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
1078 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1079 return -EOPNOTSUPP;
1080 }
1081
1082 psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
1083 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1084
1085 err = nfp_fl_push_mpls(psh_m, act, extack);
1086 if (err)
1087 return err;
1088 *a_len += sizeof(struct nfp_fl_push_mpls);
1089 break;
1090 case FLOW_ACTION_MPLS_POP:
1091 if (*a_len +
1092 sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
1093 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1094 return -EOPNOTSUPP;
1095 }
1096
1097 pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
1098 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1099
1100 nfp_fl_pop_mpls(pop_m, act);
1101 *a_len += sizeof(struct nfp_fl_pop_mpls);
1102 break;
1103 case FLOW_ACTION_MPLS_MANGLE:
1104 if (*a_len +
1105 sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
1106 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1107 return -EOPNOTSUPP;
1108 }
1109
1110 set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
1111 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1112
1113 nfp_fl_set_mpls(set_m, act);
1114 *a_len += sizeof(struct nfp_fl_set_mpls);
1115 break;
1116 case FLOW_ACTION_PTYPE:
1117
1118 if (act->ptype != PACKET_HOST)
1119 return -EOPNOTSUPP;
1120
1121 *pkt_host = true;
1122 break;
1123 default:
1124
1125 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
1126 return -EOPNOTSUPP;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
1133 int current_act_idx)
1134 {
1135 struct flow_action_entry current_act;
1136 struct flow_action_entry prev_act;
1137
1138 current_act = flow_act->entries[current_act_idx];
1139 if (current_act.id != FLOW_ACTION_MANGLE)
1140 return false;
1141
1142 if (current_act_idx == 0)
1143 return true;
1144
1145 prev_act = flow_act->entries[current_act_idx - 1];
1146
1147 return prev_act.id != FLOW_ACTION_MANGLE;
1148 }
1149
1150 static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
1151 int current_act_idx)
1152 {
1153 struct flow_action_entry current_act;
1154 struct flow_action_entry next_act;
1155
1156 current_act = flow_act->entries[current_act_idx];
1157 if (current_act.id != FLOW_ACTION_MANGLE)
1158 return false;
1159
1160 if (current_act_idx == flow_act->num_entries)
1161 return true;
1162
1163 next_act = flow_act->entries[current_act_idx + 1];
1164
1165 return next_act.id != FLOW_ACTION_MANGLE;
1166 }
1167
1168 int nfp_flower_compile_action(struct nfp_app *app,
1169 struct flow_cls_offload *flow,
1170 struct net_device *netdev,
1171 struct nfp_fl_payload *nfp_flow,
1172 struct netlink_ext_ack *extack)
1173 {
1174 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
1175 struct nfp_flower_pedit_acts set_act;
1176 enum nfp_flower_tun_type tun_type;
1177 struct flow_action_entry *act;
1178 bool pkt_host = false;
1179 u32 csum_updated = 0;
1180
1181 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
1182 nfp_flow->meta.act_len = 0;
1183 tun_type = NFP_FL_TUNNEL_NONE;
1184 act_len = 0;
1185 act_cnt = 0;
1186 tun_out_cnt = 0;
1187 out_cnt = 0;
1188
1189 flow_action_for_each(i, act, &flow->rule->action) {
1190 if (nfp_fl_check_mangle_start(&flow->rule->action, i))
1191 memset(&set_act, 0, sizeof(set_act));
1192 err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
1193 netdev, &tun_type, &tun_out_cnt,
1194 &out_cnt, &csum_updated,
1195 &set_act, &pkt_host, extack, i);
1196 if (err)
1197 return err;
1198 act_cnt++;
1199 if (nfp_fl_check_mangle_end(&flow->rule->action, i))
1200 nfp_fl_commit_mangle(flow,
1201 &nfp_flow->action_data[act_len],
1202 &act_len, &set_act, &csum_updated);
1203 }
1204
1205
1206
1207
1208 if (act_cnt > 1)
1209 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1210
1211 nfp_flow->meta.act_len = act_len;
1212
1213 return 0;
1214 }