This source file includes following definitions.
- mbpf_class
- mbpf_src
- mbpf_op
- mbpf_mode
- is_mbpf_alu
- is_mbpf_load
- is_mbpf_jmp32
- is_mbpf_jmp64
- is_mbpf_jmp
- is_mbpf_store
- is_mbpf_load_pkt
- is_mbpf_store_pkt
- is_mbpf_classic_load
- is_mbpf_classic_store
- is_mbpf_classic_store_pkt
- is_mbpf_xadd
- is_mbpf_mul
- is_mbpf_div
- is_mbpf_cond_jump
- is_mbpf_helper_call
- is_mbpf_pseudo_call
1
2
3
4 #ifndef __NFP_BPF_H__
5 #define __NFP_BPF_H__ 1
6
7 #include <linux/bitfield.h>
8 #include <linux/bpf.h>
9 #include <linux/bpf_verifier.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/rhashtable.h>
13 #include <linux/skbuff.h>
14 #include <linux/types.h>
15 #include <linux/wait.h>
16
17 #include "../ccm.h"
18 #include "../nfp_asm.h"
19 #include "fw.h"
20
21 #define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
22
23
24
25
26 #define OP_RELO_TYPE 0xff00000000000000ULL
27
28 enum nfp_relo_type {
29 RELO_NONE = 0,
30
31 RELO_BR_REL,
32
33 RELO_BR_GO_OUT,
34 RELO_BR_GO_ABORT,
35 RELO_BR_GO_CALL_PUSH_REGS,
36 RELO_BR_GO_CALL_POP_REGS,
37
38 RELO_BR_NEXT_PKT,
39 RELO_BR_HELPER,
40
41 RELO_IMMED_REL,
42 };
43
44
45
46
47
48 #define BR_OFF_RELO 15000
49
50 enum static_regs {
51 STATIC_REG_IMMA = 20,
52 STATIC_REG_IMM = 21,
53 STATIC_REG_STACK = 22,
54 STATIC_REG_PKT_LEN = 22,
55 };
56
57 enum pkt_vec {
58 PKT_VEC_PKT_LEN = 0,
59 PKT_VEC_PKT_PTR = 2,
60 PKT_VEC_QSEL_SET = 4,
61 PKT_VEC_QSEL_VAL = 6,
62 };
63
64 #define PKT_VEL_QSEL_SET_BIT 4
65
66 #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
67 #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
68 #define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET)
69 #define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL)
70
71 #define stack_reg(np) reg_a(STATIC_REG_STACK)
72 #define stack_imm(np) imm_b(np)
73 #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
74 #define pptr_reg(np) pv_ctm_ptr(np)
75 #define imm_a(np) reg_a(STATIC_REG_IMM)
76 #define imm_b(np) reg_b(STATIC_REG_IMM)
77 #define imma_a(np) reg_a(STATIC_REG_IMMA)
78 #define imma_b(np) reg_b(STATIC_REG_IMMA)
79 #define imm_both(np) reg_both(STATIC_REG_IMM)
80 #define ret_reg(np) imm_a(np)
81
82 #define NFP_BPF_ABI_FLAGS reg_imm(0)
83 #define NFP_BPF_ABI_FLAG_MARK 1
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130 struct nfp_app_bpf {
131 struct nfp_app *app;
132 struct nfp_ccm ccm;
133
134 struct bpf_offload_dev *bpf_dev;
135
136 unsigned int cmsg_key_sz;
137 unsigned int cmsg_val_sz;
138
139 unsigned int cmsg_cache_cnt;
140
141 struct list_head map_list;
142 unsigned int maps_in_use;
143 unsigned int map_elems_in_use;
144
145 struct rhashtable maps_neutral;
146
147 u32 abi_version;
148
149 struct nfp_bpf_cap_adjust_head {
150 u32 flags;
151 int off_min;
152 int off_max;
153 int guaranteed_sub;
154 int guaranteed_add;
155 } adjust_head;
156
157 struct {
158 u32 types;
159 u32 max_maps;
160 u32 max_elems;
161 u32 max_key_sz;
162 u32 max_val_sz;
163 u32 max_elem_sz;
164 } maps;
165
166 struct {
167 u32 map_lookup;
168 u32 map_update;
169 u32 map_delete;
170 u32 perf_event_output;
171 } helpers;
172
173 bool pseudo_random;
174 bool queue_select;
175 bool adjust_tail;
176 bool cmsg_multi_ent;
177 };
178
179 enum nfp_bpf_map_use {
180 NFP_MAP_UNUSED = 0,
181 NFP_MAP_USE_READ,
182 NFP_MAP_USE_WRITE,
183 NFP_MAP_USE_ATOMIC_CNT,
184 };
185
186 struct nfp_bpf_map_word {
187 unsigned char type :4;
188 unsigned char non_zero_update :1;
189 };
190
191 #define NFP_BPF_MAP_CACHE_CNT 4U
192 #define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000)
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 struct nfp_bpf_map {
210 struct bpf_offloaded_map *offmap;
211 struct nfp_app_bpf *bpf;
212 u32 tid;
213
214 spinlock_t cache_lock;
215 u32 cache_blockers;
216 u32 cache_gen;
217 u64 cache_to;
218 struct sk_buff *cache;
219
220 struct list_head l;
221 struct nfp_bpf_map_word use_map[];
222 };
223
224 struct nfp_bpf_neutral_map {
225 struct rhash_head l;
226 struct bpf_map *ptr;
227 u32 map_id;
228 u32 count;
229 };
230
231 extern const struct rhashtable_params nfp_bpf_maps_neutral_params;
232
233 struct nfp_prog;
234 struct nfp_insn_meta;
235 typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
236
237 #define nfp_prog_first_meta(nfp_prog) \
238 list_first_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
239 #define nfp_prog_last_meta(nfp_prog) \
240 list_last_entry(&(nfp_prog)->insns, struct nfp_insn_meta, l)
241 #define nfp_meta_next(meta) list_next_entry(meta, l)
242 #define nfp_meta_prev(meta) list_prev_entry(meta, l)
243
244
245
246
247
248
249 struct nfp_bpf_reg_state {
250 struct bpf_reg_state reg;
251 bool var_off;
252 };
253
254 #define FLAG_INSN_IS_JUMP_DST BIT(0)
255 #define FLAG_INSN_IS_SUBPROG_START BIT(1)
256 #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2)
257
258 #define FLAG_INSN_SKIP_NOOP BIT(3)
259
260 #define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4)
261
262 #define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5)
263
264 #define FLAG_INSN_DO_ZEXT BIT(6)
265
266 #define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \
267 FLAG_INSN_SKIP_PREC_DEPENDENT | \
268 FLAG_INSN_SKIP_VERIFIER_OPT)
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 struct nfp_insn_meta {
301 struct bpf_insn insn;
302 union {
303
304 struct {
305 struct bpf_reg_state ptr;
306 struct bpf_insn *paired_st;
307 s16 ldst_gather_len;
308 bool ptr_not_const;
309 struct {
310 s16 range_start;
311 s16 range_end;
312 bool do_init;
313 } pkt_cache;
314 bool xadd_over_16bit;
315 bool xadd_maybe_16bit;
316 };
317
318 struct {
319 struct nfp_insn_meta *jmp_dst;
320 bool jump_neg_op;
321 u32 num_insns_after_br;
322 };
323
324 struct {
325 u32 func_id;
326 struct bpf_reg_state arg1;
327 struct nfp_bpf_reg_state arg2;
328 };
329
330
331
332
333 struct {
334 u64 umin_src;
335 u64 umax_src;
336 u64 umin_dst;
337 u64 umax_dst;
338 };
339 };
340 unsigned int off;
341 unsigned short n;
342 unsigned short flags;
343 unsigned short subprog_idx;
344 instr_cb_t double_cb;
345
346 struct list_head l;
347 };
348
349 #define BPF_SIZE_MASK 0x18
350
351 static inline u8 mbpf_class(const struct nfp_insn_meta *meta)
352 {
353 return BPF_CLASS(meta->insn.code);
354 }
355
356 static inline u8 mbpf_src(const struct nfp_insn_meta *meta)
357 {
358 return BPF_SRC(meta->insn.code);
359 }
360
361 static inline u8 mbpf_op(const struct nfp_insn_meta *meta)
362 {
363 return BPF_OP(meta->insn.code);
364 }
365
366 static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
367 {
368 return BPF_MODE(meta->insn.code);
369 }
370
371 static inline bool is_mbpf_alu(const struct nfp_insn_meta *meta)
372 {
373 return mbpf_class(meta) == BPF_ALU64 || mbpf_class(meta) == BPF_ALU;
374 }
375
376 static inline bool is_mbpf_load(const struct nfp_insn_meta *meta)
377 {
378 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM);
379 }
380
381 static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta)
382 {
383 return mbpf_class(meta) == BPF_JMP32;
384 }
385
386 static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta)
387 {
388 return mbpf_class(meta) == BPF_JMP;
389 }
390
391 static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta)
392 {
393 return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta);
394 }
395
396 static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
397 {
398 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM);
399 }
400
401 static inline bool is_mbpf_load_pkt(const struct nfp_insn_meta *meta)
402 {
403 return is_mbpf_load(meta) && meta->ptr.type == PTR_TO_PACKET;
404 }
405
406 static inline bool is_mbpf_store_pkt(const struct nfp_insn_meta *meta)
407 {
408 return is_mbpf_store(meta) && meta->ptr.type == PTR_TO_PACKET;
409 }
410
411 static inline bool is_mbpf_classic_load(const struct nfp_insn_meta *meta)
412 {
413 u8 code = meta->insn.code;
414
415 return BPF_CLASS(code) == BPF_LD &&
416 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND);
417 }
418
419 static inline bool is_mbpf_classic_store(const struct nfp_insn_meta *meta)
420 {
421 u8 code = meta->insn.code;
422
423 return BPF_CLASS(code) == BPF_ST && BPF_MODE(code) == BPF_MEM;
424 }
425
426 static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
427 {
428 return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
429 }
430
431 static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
432 {
433 return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
434 }
435
436 static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
437 {
438 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_MUL;
439 }
440
441 static inline bool is_mbpf_div(const struct nfp_insn_meta *meta)
442 {
443 return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV;
444 }
445
446 static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta)
447 {
448 u8 op;
449
450 if (is_mbpf_jmp32(meta))
451 return true;
452
453 if (!is_mbpf_jmp64(meta))
454 return false;
455
456 op = mbpf_op(meta);
457 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
458 }
459
460 static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta)
461 {
462 struct bpf_insn insn = meta->insn;
463
464 return insn.code == (BPF_JMP | BPF_CALL) &&
465 insn.src_reg != BPF_PSEUDO_CALL;
466 }
467
468 static inline bool is_mbpf_pseudo_call(const struct nfp_insn_meta *meta)
469 {
470 struct bpf_insn insn = meta->insn;
471
472 return insn.code == (BPF_JMP | BPF_CALL) &&
473 insn.src_reg == BPF_PSEUDO_CALL;
474 }
475
476 #define STACK_FRAME_ALIGN 64
477
478
479
480
481
482
483 struct nfp_bpf_subprog_info {
484 u16 stack_depth;
485 u8 needs_reg_push : 1;
486 };
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513 struct nfp_prog {
514 struct nfp_app_bpf *bpf;
515
516 u64 *prog;
517 unsigned int prog_len;
518 unsigned int __prog_alloc_len;
519
520 unsigned int stack_size;
521
522 struct nfp_insn_meta *verifier_meta;
523
524 enum bpf_prog_type type;
525
526 unsigned int last_bpf_off;
527 unsigned int tgt_out;
528 unsigned int tgt_abort;
529 unsigned int tgt_call_push_regs;
530 unsigned int tgt_call_pop_regs;
531
532 unsigned int n_translated;
533 int error;
534
535 unsigned int stack_frame_depth;
536 unsigned int adjust_head_location;
537
538 unsigned int map_records_cnt;
539 unsigned int subprog_cnt;
540 struct nfp_bpf_neutral_map **map_records;
541 struct nfp_bpf_subprog_info *subprog;
542
543 unsigned int n_insns;
544 struct list_head insns;
545 };
546
547
548
549
550
551
552
553 struct nfp_bpf_vnic {
554 struct bpf_prog *tc_prog;
555 unsigned int start_off;
556 unsigned int tgt_done;
557 };
558
559 bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
560 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
561 int nfp_bpf_jit(struct nfp_prog *prog);
562 bool nfp_bpf_supported_opcode(u8 code);
563
564 int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
565 int prev_insn_idx);
566 int nfp_bpf_finalize(struct bpf_verifier_env *env);
567
568 int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
569 struct bpf_insn *insn);
570 int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
571
572 extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
573
574 struct netdev_bpf;
575 struct nfp_app;
576 struct nfp_net;
577
578 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
579 struct netdev_bpf *bpf);
580 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
581 bool old_prog, struct netlink_ext_ack *extack);
582
583 struct nfp_insn_meta *
584 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
585 unsigned int insn_idx);
586
587 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
588
589 unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
590 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
591 unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
592 long long int
593 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
594 void
595 nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
596 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
597 void *next_key);
598 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
599 void *key, void *value, u64 flags);
600 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
601 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
602 void *key, void *value);
603 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
604 void *key, void *next_key);
605
606 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
607 unsigned int len);
608
609 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
610 void
611 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data,
612 unsigned int len);
613 #endif