This source file includes following definitions.
- mlx5_cmd_stub_update_root_ft
- mlx5_cmd_stub_create_flow_table
- mlx5_cmd_stub_destroy_flow_table
- mlx5_cmd_stub_modify_flow_table
- mlx5_cmd_stub_create_flow_group
- mlx5_cmd_stub_destroy_flow_group
- mlx5_cmd_stub_create_fte
- mlx5_cmd_stub_update_fte
- mlx5_cmd_stub_delete_fte
- mlx5_cmd_stub_packet_reformat_alloc
- mlx5_cmd_stub_packet_reformat_dealloc
- mlx5_cmd_stub_modify_header_alloc
- mlx5_cmd_stub_modify_header_dealloc
- mlx5_cmd_stub_set_peer
- mlx5_cmd_stub_create_ns
- mlx5_cmd_stub_destroy_ns
- mlx5_cmd_update_root_ft
- mlx5_cmd_create_flow_table
- mlx5_cmd_destroy_flow_table
- mlx5_cmd_modify_flow_table
- mlx5_cmd_create_flow_group
- mlx5_cmd_destroy_flow_group
- mlx5_set_extended_dest
- mlx5_cmd_set_fte
- mlx5_cmd_create_fte
- mlx5_cmd_update_fte
- mlx5_cmd_delete_fte
- mlx5_cmd_fc_bulk_alloc
- mlx5_cmd_fc_alloc
- mlx5_cmd_fc_free
- mlx5_cmd_fc_query
- mlx5_cmd_fc_get_bulk_query_out_len
- mlx5_cmd_fc_bulk_query
- mlx5_cmd_packet_reformat_alloc
- mlx5_cmd_packet_reformat_dealloc
- mlx5_cmd_modify_header_alloc
- mlx5_cmd_modify_header_dealloc
- mlx5_fs_cmd_get_fw_cmds
- mlx5_fs_cmd_get_stub_cmds
- mlx5_fs_cmd_get_default
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "mlx5_core.h"
40 #include "eswitch.h"
41
42 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
43 struct mlx5_flow_table *ft,
44 u32 underlay_qpn,
45 bool disconnect)
46 {
47 return 0;
48 }
49
50 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
51 struct mlx5_flow_table *ft,
52 unsigned int log_size,
53 struct mlx5_flow_table *next_ft)
54 {
55 return 0;
56 }
57
58 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
59 struct mlx5_flow_table *ft)
60 {
61 return 0;
62 }
63
64 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
65 struct mlx5_flow_table *ft,
66 struct mlx5_flow_table *next_ft)
67 {
68 return 0;
69 }
70
71 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
72 struct mlx5_flow_table *ft,
73 u32 *in,
74 struct mlx5_flow_group *fg)
75 {
76 return 0;
77 }
78
79 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
80 struct mlx5_flow_table *ft,
81 struct mlx5_flow_group *fg)
82 {
83 return 0;
84 }
85
86 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
87 struct mlx5_flow_table *ft,
88 struct mlx5_flow_group *group,
89 struct fs_fte *fte)
90 {
91 return 0;
92 }
93
94 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
95 struct mlx5_flow_table *ft,
96 struct mlx5_flow_group *group,
97 int modify_mask,
98 struct fs_fte *fte)
99 {
100 return -EOPNOTSUPP;
101 }
102
103 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
104 struct mlx5_flow_table *ft,
105 struct fs_fte *fte)
106 {
107 return 0;
108 }
109
110 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
111 int reformat_type,
112 size_t size,
113 void *reformat_data,
114 enum mlx5_flow_namespace_type namespace,
115 struct mlx5_pkt_reformat *pkt_reformat)
116 {
117 return 0;
118 }
119
120 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
121 struct mlx5_pkt_reformat *pkt_reformat)
122 {
123 }
124
125 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
126 u8 namespace, u8 num_actions,
127 void *modify_actions,
128 struct mlx5_modify_hdr *modify_hdr)
129 {
130 return 0;
131 }
132
133 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
134 struct mlx5_modify_hdr *modify_hdr)
135 {
136 }
137
138 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
139 struct mlx5_flow_root_namespace *peer_ns)
140 {
141 return 0;
142 }
143
144 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
145 {
146 return 0;
147 }
148
149 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
150 {
151 return 0;
152 }
153
154 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
155 struct mlx5_flow_table *ft, u32 underlay_qpn,
156 bool disconnect)
157 {
158 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
159 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
160 struct mlx5_core_dev *dev = ns->dev;
161
162 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
163 underlay_qpn == 0)
164 return 0;
165
166 MLX5_SET(set_flow_table_root_in, in, opcode,
167 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
168 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
169
170 if (disconnect) {
171 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
172 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
173 } else {
174 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
175 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
176 }
177
178 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
179 if (ft->vport) {
180 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
181 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
182 }
183
184 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
185 }
186
187 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
188 struct mlx5_flow_table *ft,
189 unsigned int log_size,
190 struct mlx5_flow_table *next_ft)
191 {
192 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
193 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
194 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
195 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
196 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
197 struct mlx5_core_dev *dev = ns->dev;
198 int err;
199
200 MLX5_SET(create_flow_table_in, in, opcode,
201 MLX5_CMD_OP_CREATE_FLOW_TABLE);
202
203 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
204 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
205 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
206 if (ft->vport) {
207 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
208 MLX5_SET(create_flow_table_in, in, other_vport, 1);
209 }
210
211 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
212 en_decap);
213 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
214 en_encap);
215 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
216 term);
217
218 switch (ft->op_mod) {
219 case FS_FT_OP_MOD_NORMAL:
220 if (next_ft) {
221 MLX5_SET(create_flow_table_in, in,
222 flow_table_context.table_miss_action,
223 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
224 MLX5_SET(create_flow_table_in, in,
225 flow_table_context.table_miss_id, next_ft->id);
226 } else {
227 MLX5_SET(create_flow_table_in, in,
228 flow_table_context.table_miss_action,
229 ft->def_miss_action);
230 }
231 break;
232
233 case FS_FT_OP_MOD_LAG_DEMUX:
234 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
235 if (next_ft)
236 MLX5_SET(create_flow_table_in, in,
237 flow_table_context.lag_master_next_table_id,
238 next_ft->id);
239 break;
240 }
241
242 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
243 if (!err)
244 ft->id = MLX5_GET(create_flow_table_out, out,
245 table_id);
246 return err;
247 }
248
249 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
250 struct mlx5_flow_table *ft)
251 {
252 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
253 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
254 struct mlx5_core_dev *dev = ns->dev;
255
256 MLX5_SET(destroy_flow_table_in, in, opcode,
257 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
258 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
259 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
260 if (ft->vport) {
261 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
262 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
263 }
264
265 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
266 }
267
268 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
269 struct mlx5_flow_table *ft,
270 struct mlx5_flow_table *next_ft)
271 {
272 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
273 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
274 struct mlx5_core_dev *dev = ns->dev;
275
276 MLX5_SET(modify_flow_table_in, in, opcode,
277 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
278 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
279 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
280
281 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
282 MLX5_SET(modify_flow_table_in, in, modify_field_select,
283 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
284 if (next_ft) {
285 MLX5_SET(modify_flow_table_in, in,
286 flow_table_context.lag_master_next_table_id, next_ft->id);
287 } else {
288 MLX5_SET(modify_flow_table_in, in,
289 flow_table_context.lag_master_next_table_id, 0);
290 }
291 } else {
292 if (ft->vport) {
293 MLX5_SET(modify_flow_table_in, in, vport_number,
294 ft->vport);
295 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
296 }
297 MLX5_SET(modify_flow_table_in, in, modify_field_select,
298 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
299 if (next_ft) {
300 MLX5_SET(modify_flow_table_in, in,
301 flow_table_context.table_miss_action,
302 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
303 MLX5_SET(modify_flow_table_in, in,
304 flow_table_context.table_miss_id,
305 next_ft->id);
306 } else {
307 MLX5_SET(modify_flow_table_in, in,
308 flow_table_context.table_miss_action,
309 ft->def_miss_action);
310 }
311 }
312
313 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
314 }
315
316 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
317 struct mlx5_flow_table *ft,
318 u32 *in,
319 struct mlx5_flow_group *fg)
320 {
321 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
322 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
323 struct mlx5_core_dev *dev = ns->dev;
324 int err;
325
326 MLX5_SET(create_flow_group_in, in, opcode,
327 MLX5_CMD_OP_CREATE_FLOW_GROUP);
328 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
329 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
330 if (ft->vport) {
331 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
332 MLX5_SET(create_flow_group_in, in, other_vport, 1);
333 }
334
335 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
336 if (!err)
337 fg->id = MLX5_GET(create_flow_group_out, out,
338 group_id);
339 return err;
340 }
341
342 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
343 struct mlx5_flow_table *ft,
344 struct mlx5_flow_group *fg)
345 {
346 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
347 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
348 struct mlx5_core_dev *dev = ns->dev;
349
350 MLX5_SET(destroy_flow_group_in, in, opcode,
351 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
352 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
353 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
354 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
355 if (ft->vport) {
356 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
357 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
358 }
359
360 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
361 }
362
363 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
364 struct fs_fte *fte, bool *extended_dest)
365 {
366 int fw_log_max_fdb_encap_uplink =
367 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
368 int num_fwd_destinations = 0;
369 struct mlx5_flow_rule *dst;
370 int num_encap = 0;
371
372 *extended_dest = false;
373 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
374 return 0;
375
376 list_for_each_entry(dst, &fte->node.children, node.list) {
377 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
378 continue;
379 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
380 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
381 num_encap++;
382 num_fwd_destinations++;
383 }
384 if (num_fwd_destinations > 1 && num_encap > 0)
385 *extended_dest = true;
386
387 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
388 mlx5_core_warn(dev, "FW does not support extended destination");
389 return -EOPNOTSUPP;
390 }
391 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
392 mlx5_core_warn(dev, "FW does not support more than %d encaps",
393 1 << fw_log_max_fdb_encap_uplink);
394 return -EOPNOTSUPP;
395 }
396
397 return 0;
398 }
399 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
400 int opmod, int modify_mask,
401 struct mlx5_flow_table *ft,
402 unsigned group_id,
403 struct fs_fte *fte)
404 {
405 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
406 bool extended_dest = false;
407 struct mlx5_flow_rule *dst;
408 void *in_flow_context, *vlan;
409 void *in_match_value;
410 unsigned int inlen;
411 int dst_cnt_size;
412 void *in_dests;
413 u32 *in;
414 int err;
415
416 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
417 return -EOPNOTSUPP;
418
419 if (!extended_dest)
420 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
421 else
422 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
423
424 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
425 in = kvzalloc(inlen, GFP_KERNEL);
426 if (!in)
427 return -ENOMEM;
428
429 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
430 MLX5_SET(set_fte_in, in, op_mod, opmod);
431 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
432 MLX5_SET(set_fte_in, in, table_type, ft->type);
433 MLX5_SET(set_fte_in, in, table_id, ft->id);
434 MLX5_SET(set_fte_in, in, flow_index, fte->index);
435 if (ft->vport) {
436 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
437 MLX5_SET(set_fte_in, in, other_vport, 1);
438 }
439
440 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
441 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
442
443 MLX5_SET(flow_context, in_flow_context, flow_tag,
444 fte->flow_context.flow_tag);
445 MLX5_SET(flow_context, in_flow_context, flow_source,
446 fte->flow_context.flow_source);
447
448 MLX5_SET(flow_context, in_flow_context, extended_destination,
449 extended_dest);
450 if (extended_dest) {
451 u32 action;
452
453 action = fte->action.action &
454 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
455 MLX5_SET(flow_context, in_flow_context, action, action);
456 } else {
457 MLX5_SET(flow_context, in_flow_context, action,
458 fte->action.action);
459 if (fte->action.pkt_reformat)
460 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
461 fte->action.pkt_reformat->id);
462 }
463 if (fte->action.modify_hdr)
464 MLX5_SET(flow_context, in_flow_context, modify_header_id,
465 fte->action.modify_hdr->id);
466
467 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
468
469 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
470 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
471 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
472
473 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
474
475 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
476 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
477 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
478
479 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
480 match_value);
481 memcpy(in_match_value, &fte->val, sizeof(fte->val));
482
483 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
484 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
485 int list_size = 0;
486
487 list_for_each_entry(dst, &fte->node.children, node.list) {
488 unsigned int id, type = dst->dest_attr.type;
489
490 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
491 continue;
492
493 switch (type) {
494 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
495 id = dst->dest_attr.ft_num;
496 type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
497 break;
498 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
499 id = dst->dest_attr.ft->id;
500 break;
501 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
502 id = dst->dest_attr.vport.num;
503 MLX5_SET(dest_format_struct, in_dests,
504 destination_eswitch_owner_vhca_id_valid,
505 !!(dst->dest_attr.vport.flags &
506 MLX5_FLOW_DEST_VPORT_VHCA_ID));
507 MLX5_SET(dest_format_struct, in_dests,
508 destination_eswitch_owner_vhca_id,
509 dst->dest_attr.vport.vhca_id);
510 if (extended_dest &&
511 dst->dest_attr.vport.pkt_reformat) {
512 MLX5_SET(dest_format_struct, in_dests,
513 packet_reformat,
514 !!(dst->dest_attr.vport.flags &
515 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
516 MLX5_SET(extended_dest_format, in_dests,
517 packet_reformat_id,
518 dst->dest_attr.vport.pkt_reformat->id);
519 }
520 break;
521 default:
522 id = dst->dest_attr.tir_num;
523 }
524
525 MLX5_SET(dest_format_struct, in_dests, destination_type,
526 type);
527 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
528 in_dests += dst_cnt_size;
529 list_size++;
530 }
531
532 MLX5_SET(flow_context, in_flow_context, destination_list_size,
533 list_size);
534 }
535
536 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
537 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
538 log_max_flow_counter,
539 ft->type));
540 int list_size = 0;
541
542 list_for_each_entry(dst, &fte->node.children, node.list) {
543 if (dst->dest_attr.type !=
544 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
545 continue;
546
547 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
548 dst->dest_attr.counter_id);
549 in_dests += dst_cnt_size;
550 list_size++;
551 }
552 if (list_size > max_list_size) {
553 err = -EINVAL;
554 goto err_out;
555 }
556
557 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
558 list_size);
559 }
560
561 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
562 err_out:
563 kvfree(in);
564 return err;
565 }
566
567 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
568 struct mlx5_flow_table *ft,
569 struct mlx5_flow_group *group,
570 struct fs_fte *fte)
571 {
572 struct mlx5_core_dev *dev = ns->dev;
573 unsigned int group_id = group->id;
574
575 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
576 }
577
578 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
579 struct mlx5_flow_table *ft,
580 struct mlx5_flow_group *fg,
581 int modify_mask,
582 struct fs_fte *fte)
583 {
584 int opmod;
585 struct mlx5_core_dev *dev = ns->dev;
586 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
587 flow_table_properties_nic_receive.
588 flow_modify_en);
589 if (!atomic_mod_cap)
590 return -EOPNOTSUPP;
591 opmod = 1;
592
593 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
594 }
595
596 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
597 struct mlx5_flow_table *ft,
598 struct fs_fte *fte)
599 {
600 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
601 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
602 struct mlx5_core_dev *dev = ns->dev;
603
604 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
605 MLX5_SET(delete_fte_in, in, table_type, ft->type);
606 MLX5_SET(delete_fte_in, in, table_id, ft->id);
607 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
608 if (ft->vport) {
609 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
610 MLX5_SET(delete_fte_in, in, other_vport, 1);
611 }
612
613 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
614 }
615
616 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
617 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
618 u32 *id)
619 {
620 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
621 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
622 int err;
623
624 MLX5_SET(alloc_flow_counter_in, in, opcode,
625 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
626 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
627
628 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
629 if (!err)
630 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
631 return err;
632 }
633
634 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
635 {
636 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
637 }
638
639 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
640 {
641 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
642 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
643
644 MLX5_SET(dealloc_flow_counter_in, in, opcode,
645 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
646 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
647 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
648 }
649
650 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
651 u64 *packets, u64 *bytes)
652 {
653 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
654 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
655 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
656 void *stats;
657 int err = 0;
658
659 MLX5_SET(query_flow_counter_in, in, opcode,
660 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
661 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
662 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
663 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
664 if (err)
665 return err;
666
667 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
668 *packets = MLX5_GET64(traffic_counter, stats, packets);
669 *bytes = MLX5_GET64(traffic_counter, stats, octets);
670 return 0;
671 }
672
673 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
674 {
675 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
676 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
677 }
678
679 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
680 u32 *out)
681 {
682 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
683 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
684
685 MLX5_SET(query_flow_counter_in, in, opcode,
686 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
687 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
688 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
689 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
690 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
691 }
692
693 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
694 int reformat_type,
695 size_t size,
696 void *reformat_data,
697 enum mlx5_flow_namespace_type namespace,
698 struct mlx5_pkt_reformat *pkt_reformat)
699 {
700 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)];
701 struct mlx5_core_dev *dev = ns->dev;
702 void *packet_reformat_context_in;
703 int max_encap_size;
704 void *reformat;
705 int inlen;
706 int err;
707 u32 *in;
708
709 if (namespace == MLX5_FLOW_NAMESPACE_FDB)
710 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
711 else
712 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
713
714 if (size > max_encap_size) {
715 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
716 size, max_encap_size);
717 return -EINVAL;
718 }
719
720 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
721 GFP_KERNEL);
722 if (!in)
723 return -ENOMEM;
724
725 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
726 in, packet_reformat_context);
727 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
728 packet_reformat_context_in,
729 reformat_data);
730 inlen = reformat - (void *)in + size;
731
732 memset(in, 0, inlen);
733 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
734 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
735 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
736 reformat_data_size, size);
737 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
738 reformat_type, reformat_type);
739 memcpy(reformat, reformat_data, size);
740
741 memset(out, 0, sizeof(out));
742 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
743
744 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
745 out, packet_reformat_id);
746 kfree(in);
747 return err;
748 }
749
750 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
751 struct mlx5_pkt_reformat *pkt_reformat)
752 {
753 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)];
754 u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)];
755 struct mlx5_core_dev *dev = ns->dev;
756
757 memset(in, 0, sizeof(in));
758 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
759 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
760 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
761 pkt_reformat->id);
762
763 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
764 }
765
766 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
767 u8 namespace, u8 num_actions,
768 void *modify_actions,
769 struct mlx5_modify_hdr *modify_hdr)
770 {
771 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
772 int max_actions, actions_size, inlen, err;
773 struct mlx5_core_dev *dev = ns->dev;
774 void *actions_in;
775 u8 table_type;
776 u32 *in;
777
778 switch (namespace) {
779 case MLX5_FLOW_NAMESPACE_FDB:
780 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
781 table_type = FS_FT_FDB;
782 break;
783 case MLX5_FLOW_NAMESPACE_KERNEL:
784 case MLX5_FLOW_NAMESPACE_BYPASS:
785 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
786 table_type = FS_FT_NIC_RX;
787 break;
788 case MLX5_FLOW_NAMESPACE_EGRESS:
789 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
790 table_type = FS_FT_NIC_TX;
791 break;
792 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
793 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
794 table_type = FS_FT_ESW_INGRESS_ACL;
795 break;
796 default:
797 return -EOPNOTSUPP;
798 }
799
800 if (num_actions > max_actions) {
801 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
802 num_actions, max_actions);
803 return -EOPNOTSUPP;
804 }
805
806 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
807 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
808
809 in = kzalloc(inlen, GFP_KERNEL);
810 if (!in)
811 return -ENOMEM;
812
813 MLX5_SET(alloc_modify_header_context_in, in, opcode,
814 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
815 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
816 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
817
818 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
819 memcpy(actions_in, modify_actions, actions_size);
820
821 memset(out, 0, sizeof(out));
822 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
823
824 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
825 kfree(in);
826 return err;
827 }
828
829 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
830 struct mlx5_modify_hdr *modify_hdr)
831 {
832 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
833 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
834 struct mlx5_core_dev *dev = ns->dev;
835
836 memset(in, 0, sizeof(in));
837 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
838 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
839 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
840 modify_hdr->id);
841
842 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
843 }
844
845 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
846 .create_flow_table = mlx5_cmd_create_flow_table,
847 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
848 .modify_flow_table = mlx5_cmd_modify_flow_table,
849 .create_flow_group = mlx5_cmd_create_flow_group,
850 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
851 .create_fte = mlx5_cmd_create_fte,
852 .update_fte = mlx5_cmd_update_fte,
853 .delete_fte = mlx5_cmd_delete_fte,
854 .update_root_ft = mlx5_cmd_update_root_ft,
855 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
856 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
857 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
858 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
859 .set_peer = mlx5_cmd_stub_set_peer,
860 .create_ns = mlx5_cmd_stub_create_ns,
861 .destroy_ns = mlx5_cmd_stub_destroy_ns,
862 };
863
864 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
865 .create_flow_table = mlx5_cmd_stub_create_flow_table,
866 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
867 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
868 .create_flow_group = mlx5_cmd_stub_create_flow_group,
869 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
870 .create_fte = mlx5_cmd_stub_create_fte,
871 .update_fte = mlx5_cmd_stub_update_fte,
872 .delete_fte = mlx5_cmd_stub_delete_fte,
873 .update_root_ft = mlx5_cmd_stub_update_root_ft,
874 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
875 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
876 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
877 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
878 .set_peer = mlx5_cmd_stub_set_peer,
879 .create_ns = mlx5_cmd_stub_create_ns,
880 .destroy_ns = mlx5_cmd_stub_destroy_ns,
881 };
882
883 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
884 {
885 return &mlx5_flow_cmds;
886 }
887
888 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
889 {
890 return &mlx5_flow_cmd_stubs;
891 }
892
893 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
894 {
895 switch (type) {
896 case FS_FT_NIC_RX:
897 case FS_FT_ESW_EGRESS_ACL:
898 case FS_FT_ESW_INGRESS_ACL:
899 case FS_FT_FDB:
900 case FS_FT_SNIFFER_RX:
901 case FS_FT_SNIFFER_TX:
902 case FS_FT_NIC_TX:
903 case FS_FT_RDMA_RX:
904 return mlx5_fs_cmd_get_fw_cmds();
905 default:
906 return mlx5_fs_cmd_get_stub_cmds();
907 }
908 }