This source file includes following definitions.
- tcf_bpf_act
- tcf_bpf_is_ebpf
- tcf_bpf_dump_bpf_info
- tcf_bpf_dump_ebpf_info
- tcf_bpf_dump
- tcf_bpf_init_from_ops
- tcf_bpf_init_from_efd
- tcf_bpf_cfg_cleanup
- tcf_bpf_prog_fill_cfg
- tcf_bpf_init
- tcf_bpf_cleanup
- tcf_bpf_walker
- tcf_bpf_search
- bpf_init_net
- bpf_exit_net
- bpf_init_module
- bpf_cleanup_module
1
2
3
4
5
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/filter.h>
12 #include <linux/bpf.h>
13
14 #include <net/netlink.h>
15 #include <net/pkt_sched.h>
16 #include <net/pkt_cls.h>
17
18 #include <linux/tc_act/tc_bpf.h>
19 #include <net/tc_act/tc_bpf.h>
20
21 #define ACT_BPF_NAME_LEN 256
22
23 struct tcf_bpf_cfg {
24 struct bpf_prog *filter;
25 struct sock_filter *bpf_ops;
26 const char *bpf_name;
27 u16 bpf_num_ops;
28 bool is_ebpf;
29 };
30
31 static unsigned int bpf_net_id;
32 static struct tc_action_ops act_bpf_ops;
33
34 static int tcf_bpf_act(struct sk_buff *skb, const struct tc_action *act,
35 struct tcf_result *res)
36 {
37 bool at_ingress = skb_at_tc_ingress(skb);
38 struct tcf_bpf *prog = to_bpf(act);
39 struct bpf_prog *filter;
40 int action, filter_res;
41
42 tcf_lastuse_update(&prog->tcf_tm);
43 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
44
45 rcu_read_lock();
46 filter = rcu_dereference(prog->filter);
47 if (at_ingress) {
48 __skb_push(skb, skb->mac_len);
49 bpf_compute_data_pointers(skb);
50 filter_res = BPF_PROG_RUN(filter, skb);
51 __skb_pull(skb, skb->mac_len);
52 } else {
53 bpf_compute_data_pointers(skb);
54 filter_res = BPF_PROG_RUN(filter, skb);
55 }
56 rcu_read_unlock();
57
58
59
60
61
62
63
64
65
66
67
68 switch (filter_res) {
69 case TC_ACT_PIPE:
70 case TC_ACT_RECLASSIFY:
71 case TC_ACT_OK:
72 case TC_ACT_REDIRECT:
73 action = filter_res;
74 break;
75 case TC_ACT_SHOT:
76 action = filter_res;
77 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
78 break;
79 case TC_ACT_UNSPEC:
80 action = prog->tcf_action;
81 break;
82 default:
83 action = TC_ACT_UNSPEC;
84 break;
85 }
86
87 return action;
88 }
89
90 static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
91 {
92 return !prog->bpf_ops;
93 }
94
95 static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
96 struct sk_buff *skb)
97 {
98 struct nlattr *nla;
99
100 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
101 return -EMSGSIZE;
102
103 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
104 sizeof(struct sock_filter));
105 if (nla == NULL)
106 return -EMSGSIZE;
107
108 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
109
110 return 0;
111 }
112
113 static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
114 struct sk_buff *skb)
115 {
116 struct nlattr *nla;
117
118 if (prog->bpf_name &&
119 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
120 return -EMSGSIZE;
121
122 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
123 return -EMSGSIZE;
124
125 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
126 if (nla == NULL)
127 return -EMSGSIZE;
128
129 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
130
131 return 0;
132 }
133
134 static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
135 int bind, int ref)
136 {
137 unsigned char *tp = skb_tail_pointer(skb);
138 struct tcf_bpf *prog = to_bpf(act);
139 struct tc_act_bpf opt = {
140 .index = prog->tcf_index,
141 .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
142 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
143 };
144 struct tcf_t tm;
145 int ret;
146
147 spin_lock_bh(&prog->tcf_lock);
148 opt.action = prog->tcf_action;
149 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
150 goto nla_put_failure;
151
152 if (tcf_bpf_is_ebpf(prog))
153 ret = tcf_bpf_dump_ebpf_info(prog, skb);
154 else
155 ret = tcf_bpf_dump_bpf_info(prog, skb);
156 if (ret)
157 goto nla_put_failure;
158
159 tcf_tm_dump(&tm, &prog->tcf_tm);
160 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
161 TCA_ACT_BPF_PAD))
162 goto nla_put_failure;
163
164 spin_unlock_bh(&prog->tcf_lock);
165 return skb->len;
166
167 nla_put_failure:
168 spin_unlock_bh(&prog->tcf_lock);
169 nlmsg_trim(skb, tp);
170 return -1;
171 }
172
173 static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
174 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
175 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
176 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
177 .len = ACT_BPF_NAME_LEN },
178 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
179 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
180 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
181 };
182
183 static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
184 {
185 struct sock_filter *bpf_ops;
186 struct sock_fprog_kern fprog_tmp;
187 struct bpf_prog *fp;
188 u16 bpf_size, bpf_num_ops;
189 int ret;
190
191 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
192 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
193 return -EINVAL;
194
195 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
196 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
197 return -EINVAL;
198
199 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
200 if (bpf_ops == NULL)
201 return -ENOMEM;
202
203 fprog_tmp.len = bpf_num_ops;
204 fprog_tmp.filter = bpf_ops;
205
206 ret = bpf_prog_create(&fp, &fprog_tmp);
207 if (ret < 0) {
208 kfree(bpf_ops);
209 return ret;
210 }
211
212 cfg->bpf_ops = bpf_ops;
213 cfg->bpf_num_ops = bpf_num_ops;
214 cfg->filter = fp;
215 cfg->is_ebpf = false;
216
217 return 0;
218 }
219
220 static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
221 {
222 struct bpf_prog *fp;
223 char *name = NULL;
224 u32 bpf_fd;
225
226 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
227
228 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
229 if (IS_ERR(fp))
230 return PTR_ERR(fp);
231
232 if (tb[TCA_ACT_BPF_NAME]) {
233 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
234 if (!name) {
235 bpf_prog_put(fp);
236 return -ENOMEM;
237 }
238 }
239
240 cfg->bpf_name = name;
241 cfg->filter = fp;
242 cfg->is_ebpf = true;
243
244 return 0;
245 }
246
247 static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
248 {
249 struct bpf_prog *filter = cfg->filter;
250
251 if (filter) {
252 if (cfg->is_ebpf)
253 bpf_prog_put(filter);
254 else
255 bpf_prog_destroy(filter);
256 }
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260 }
261
262 static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264 {
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266
267
268
269 cfg->filter = rcu_dereference_protected(prog->filter, 1);
270
271 cfg->bpf_ops = prog->bpf_ops;
272 cfg->bpf_name = prog->bpf_name;
273 }
274
275 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
276 struct nlattr *est, struct tc_action **act,
277 int replace, int bind, bool rtnl_held,
278 struct tcf_proto *tp, struct netlink_ext_ack *extack)
279 {
280 struct tc_action_net *tn = net_generic(net, bpf_net_id);
281 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
282 struct tcf_chain *goto_ch = NULL;
283 struct tcf_bpf_cfg cfg, old;
284 struct tc_act_bpf *parm;
285 struct tcf_bpf *prog;
286 bool is_bpf, is_ebpf;
287 int ret, res = 0;
288 u32 index;
289
290 if (!nla)
291 return -EINVAL;
292
293 ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
294 act_bpf_policy, NULL);
295 if (ret < 0)
296 return ret;
297
298 if (!tb[TCA_ACT_BPF_PARMS])
299 return -EINVAL;
300
301 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
302 index = parm->index;
303 ret = tcf_idr_check_alloc(tn, &index, act, bind);
304 if (!ret) {
305 ret = tcf_idr_create(tn, index, est, act,
306 &act_bpf_ops, bind, true);
307 if (ret < 0) {
308 tcf_idr_cleanup(tn, index);
309 return ret;
310 }
311
312 res = ACT_P_CREATED;
313 } else if (ret > 0) {
314
315 if (bind)
316 return 0;
317
318 if (!replace) {
319 tcf_idr_release(*act, bind);
320 return -EEXIST;
321 }
322 } else {
323 return ret;
324 }
325
326 ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
327 if (ret < 0)
328 goto release_idr;
329
330 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
331 is_ebpf = tb[TCA_ACT_BPF_FD];
332
333 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
334 ret = -EINVAL;
335 goto put_chain;
336 }
337
338 memset(&cfg, 0, sizeof(cfg));
339
340 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
341 tcf_bpf_init_from_efd(tb, &cfg);
342 if (ret < 0)
343 goto put_chain;
344
345 prog = to_bpf(*act);
346
347 spin_lock_bh(&prog->tcf_lock);
348 if (res != ACT_P_CREATED)
349 tcf_bpf_prog_fill_cfg(prog, &old);
350
351 prog->bpf_ops = cfg.bpf_ops;
352 prog->bpf_name = cfg.bpf_name;
353
354 if (cfg.bpf_num_ops)
355 prog->bpf_num_ops = cfg.bpf_num_ops;
356
357 goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
358 rcu_assign_pointer(prog->filter, cfg.filter);
359 spin_unlock_bh(&prog->tcf_lock);
360
361 if (goto_ch)
362 tcf_chain_put_by_act(goto_ch);
363
364 if (res == ACT_P_CREATED) {
365 tcf_idr_insert(tn, *act);
366 } else {
367
368 synchronize_rcu();
369 tcf_bpf_cfg_cleanup(&old);
370 }
371
372 return res;
373
374 put_chain:
375 if (goto_ch)
376 tcf_chain_put_by_act(goto_ch);
377
378 release_idr:
379 tcf_idr_release(*act, bind);
380 return ret;
381 }
382
383 static void tcf_bpf_cleanup(struct tc_action *act)
384 {
385 struct tcf_bpf_cfg tmp;
386
387 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
388 tcf_bpf_cfg_cleanup(&tmp);
389 }
390
391 static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
392 struct netlink_callback *cb, int type,
393 const struct tc_action_ops *ops,
394 struct netlink_ext_ack *extack)
395 {
396 struct tc_action_net *tn = net_generic(net, bpf_net_id);
397
398 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
399 }
400
401 static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index)
402 {
403 struct tc_action_net *tn = net_generic(net, bpf_net_id);
404
405 return tcf_idr_search(tn, a, index);
406 }
407
408 static struct tc_action_ops act_bpf_ops __read_mostly = {
409 .kind = "bpf",
410 .id = TCA_ID_BPF,
411 .owner = THIS_MODULE,
412 .act = tcf_bpf_act,
413 .dump = tcf_bpf_dump,
414 .cleanup = tcf_bpf_cleanup,
415 .init = tcf_bpf_init,
416 .walk = tcf_bpf_walker,
417 .lookup = tcf_bpf_search,
418 .size = sizeof(struct tcf_bpf),
419 };
420
421 static __net_init int bpf_init_net(struct net *net)
422 {
423 struct tc_action_net *tn = net_generic(net, bpf_net_id);
424
425 return tc_action_net_init(net, tn, &act_bpf_ops);
426 }
427
428 static void __net_exit bpf_exit_net(struct list_head *net_list)
429 {
430 tc_action_net_exit(net_list, bpf_net_id);
431 }
432
433 static struct pernet_operations bpf_net_ops = {
434 .init = bpf_init_net,
435 .exit_batch = bpf_exit_net,
436 .id = &bpf_net_id,
437 .size = sizeof(struct tc_action_net),
438 };
439
440 static int __init bpf_init_module(void)
441 {
442 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
443 }
444
445 static void __exit bpf_cleanup_module(void)
446 {
447 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
448 }
449
450 module_init(bpf_init_module);
451 module_exit(bpf_cleanup_module);
452
453 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
454 MODULE_DESCRIPTION("TC BPF based action");
455 MODULE_LICENSE("GPL v2");