This source file includes following definitions.
- nft_flow_rule_alloc
- nft_flow_rule_create
- nft_flow_rule_destroy
- nft_offload_set_dependency
- nft_offload_update_dependency
- nft_flow_offload_common_init
- nft_setup_cb_call
- nft_chain_offload_priority
- nft_flow_offload_rule
- nft_flow_offload_bind
- nft_flow_offload_unbind
- nft_block_setup
- nft_block_offload_cmd
- nft_indr_block_ing_cmd
- nft_indr_block_offload_cmd
- nft_flow_offload_chain
- nft_flow_rule_offload_commit
- __nft_offload_get_chain
- nft_indr_block_cb
- nft_offload_chain_clean
- nft_offload_netdev_event
- nft_offload_init
- nft_offload_exit
1
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <net/flow_offload.h>
6 #include <net/netfilter/nf_tables.h>
7 #include <net/netfilter/nf_tables_offload.h>
8 #include <net/pkt_cls.h>
9
10 static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
11 {
12 struct nft_flow_rule *flow;
13
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
15 if (!flow)
16 return NULL;
17
18 flow->rule = flow_rule_alloc(num_actions);
19 if (!flow->rule) {
20 kfree(flow);
21 return NULL;
22 }
23
24 flow->rule->match.dissector = &flow->match.dissector;
25 flow->rule->match.mask = &flow->match.mask;
26 flow->rule->match.key = &flow->match.key;
27
28 return flow;
29 }
30
31 struct nft_flow_rule *nft_flow_rule_create(struct net *net,
32 const struct nft_rule *rule)
33 {
34 struct nft_offload_ctx *ctx;
35 struct nft_flow_rule *flow;
36 int num_actions = 0, err;
37 struct nft_expr *expr;
38
39 expr = nft_expr_first(rule);
40 while (expr->ops && expr != nft_expr_last(rule)) {
41 if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
42 num_actions++;
43
44 expr = nft_expr_next(expr);
45 }
46
47 if (num_actions == 0)
48 return ERR_PTR(-EOPNOTSUPP);
49
50 flow = nft_flow_rule_alloc(num_actions);
51 if (!flow)
52 return ERR_PTR(-ENOMEM);
53
54 expr = nft_expr_first(rule);
55
56 ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
57 if (!ctx) {
58 err = -ENOMEM;
59 goto err_out;
60 }
61 ctx->net = net;
62 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
63
64 while (expr->ops && expr != nft_expr_last(rule)) {
65 if (!expr->ops->offload) {
66 err = -EOPNOTSUPP;
67 goto err_out;
68 }
69 err = expr->ops->offload(ctx, flow, expr);
70 if (err < 0)
71 goto err_out;
72
73 expr = nft_expr_next(expr);
74 }
75 flow->proto = ctx->dep.l3num;
76 kfree(ctx);
77
78 return flow;
79 err_out:
80 kfree(ctx);
81 nft_flow_rule_destroy(flow);
82
83 return ERR_PTR(err);
84 }
85
86 void nft_flow_rule_destroy(struct nft_flow_rule *flow)
87 {
88 struct flow_action_entry *entry;
89 int i;
90
91 flow_action_for_each(i, entry, &flow->rule->action) {
92 switch (entry->id) {
93 case FLOW_ACTION_REDIRECT:
94 case FLOW_ACTION_MIRRED:
95 dev_put(entry->dev);
96 break;
97 default:
98 break;
99 }
100 }
101 kfree(flow->rule);
102 kfree(flow);
103 }
104
105 void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
106 enum nft_offload_dep_type type)
107 {
108 ctx->dep.type = type;
109 }
110
111 void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
112 const void *data, u32 len)
113 {
114 switch (ctx->dep.type) {
115 case NFT_OFFLOAD_DEP_NETWORK:
116 WARN_ON(len != sizeof(__u16));
117 memcpy(&ctx->dep.l3num, data, sizeof(__u16));
118 break;
119 case NFT_OFFLOAD_DEP_TRANSPORT:
120 WARN_ON(len != sizeof(__u8));
121 memcpy(&ctx->dep.protonum, data, sizeof(__u8));
122 break;
123 default:
124 break;
125 }
126 ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
127 }
128
129 static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
130 __be16 proto, int priority,
131 struct netlink_ext_ack *extack)
132 {
133 common->protocol = proto;
134 common->prio = priority;
135 common->extack = extack;
136 }
137
138 static int nft_setup_cb_call(struct nft_base_chain *basechain,
139 enum tc_setup_type type, void *type_data)
140 {
141 struct flow_block_cb *block_cb;
142 int err;
143
144 list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
145 err = block_cb->cb(type, type_data, block_cb->cb_priv);
146 if (err < 0)
147 return err;
148 }
149 return 0;
150 }
151
152 int nft_chain_offload_priority(struct nft_base_chain *basechain)
153 {
154 if (basechain->ops.priority <= 0 ||
155 basechain->ops.priority > USHRT_MAX)
156 return -1;
157
158 return 0;
159 }
160
161 static int nft_flow_offload_rule(struct nft_chain *chain,
162 struct nft_rule *rule,
163 struct nft_flow_rule *flow,
164 enum flow_cls_command command)
165 {
166 struct flow_cls_offload cls_flow = {};
167 struct nft_base_chain *basechain;
168 struct netlink_ext_ack extack;
169 __be16 proto = ETH_P_ALL;
170
171 if (!nft_is_base_chain(chain))
172 return -EOPNOTSUPP;
173
174 basechain = nft_base_chain(chain);
175
176 if (flow)
177 proto = flow->proto;
178
179 nft_flow_offload_common_init(&cls_flow.common, proto,
180 basechain->ops.priority, &extack);
181 cls_flow.command = command;
182 cls_flow.cookie = (unsigned long) rule;
183 if (flow)
184 cls_flow.rule = flow->rule;
185
186 return nft_setup_cb_call(basechain, TC_SETUP_CLSFLOWER, &cls_flow);
187 }
188
189 static int nft_flow_offload_bind(struct flow_block_offload *bo,
190 struct nft_base_chain *basechain)
191 {
192 list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
193 return 0;
194 }
195
196 static int nft_flow_offload_unbind(struct flow_block_offload *bo,
197 struct nft_base_chain *basechain)
198 {
199 struct flow_block_cb *block_cb, *next;
200
201 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
202 list_del(&block_cb->list);
203 flow_block_cb_free(block_cb);
204 }
205
206 return 0;
207 }
208
209 static int nft_block_setup(struct nft_base_chain *basechain,
210 struct flow_block_offload *bo,
211 enum flow_block_command cmd)
212 {
213 int err;
214
215 switch (cmd) {
216 case FLOW_BLOCK_BIND:
217 err = nft_flow_offload_bind(bo, basechain);
218 break;
219 case FLOW_BLOCK_UNBIND:
220 err = nft_flow_offload_unbind(bo, basechain);
221 break;
222 default:
223 WARN_ON_ONCE(1);
224 err = -EOPNOTSUPP;
225 }
226
227 return err;
228 }
229
230 static int nft_block_offload_cmd(struct nft_base_chain *chain,
231 struct net_device *dev,
232 enum flow_block_command cmd)
233 {
234 struct netlink_ext_ack extack = {};
235 struct flow_block_offload bo = {};
236 int err;
237
238 bo.net = dev_net(dev);
239 bo.block = &chain->flow_block;
240 bo.command = cmd;
241 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
242 bo.extack = &extack;
243 INIT_LIST_HEAD(&bo.cb_list);
244
245 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
246 if (err < 0)
247 return err;
248
249 return nft_block_setup(chain, &bo, cmd);
250 }
251
252 static void nft_indr_block_ing_cmd(struct net_device *dev,
253 struct nft_base_chain *chain,
254 flow_indr_block_bind_cb_t *cb,
255 void *cb_priv,
256 enum flow_block_command cmd)
257 {
258 struct netlink_ext_ack extack = {};
259 struct flow_block_offload bo = {};
260
261 if (!chain)
262 return;
263
264 bo.net = dev_net(dev);
265 bo.block = &chain->flow_block;
266 bo.command = cmd;
267 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
268 bo.extack = &extack;
269 INIT_LIST_HEAD(&bo.cb_list);
270
271 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
272
273 nft_block_setup(chain, &bo, cmd);
274 }
275
276 static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
277 struct net_device *dev,
278 enum flow_block_command cmd)
279 {
280 struct flow_block_offload bo = {};
281 struct netlink_ext_ack extack = {};
282
283 bo.net = dev_net(dev);
284 bo.block = &chain->flow_block;
285 bo.command = cmd;
286 bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
287 bo.extack = &extack;
288 INIT_LIST_HEAD(&bo.cb_list);
289
290 flow_indr_block_call(dev, &bo, cmd);
291
292 if (list_empty(&bo.cb_list))
293 return -EOPNOTSUPP;
294
295 return nft_block_setup(chain, &bo, cmd);
296 }
297
298 #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
299
300 static int nft_flow_offload_chain(struct nft_chain *chain,
301 u8 *ppolicy,
302 enum flow_block_command cmd)
303 {
304 struct nft_base_chain *basechain;
305 struct net_device *dev;
306 u8 policy;
307
308 if (!nft_is_base_chain(chain))
309 return -EOPNOTSUPP;
310
311 basechain = nft_base_chain(chain);
312 dev = basechain->ops.dev;
313 if (!dev)
314 return -EOPNOTSUPP;
315
316 policy = ppolicy ? *ppolicy : basechain->policy;
317
318
319 if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
320 return -EOPNOTSUPP;
321
322 if (dev->netdev_ops->ndo_setup_tc)
323 return nft_block_offload_cmd(basechain, dev, cmd);
324 else
325 return nft_indr_block_offload_cmd(basechain, dev, cmd);
326 }
327
328 int nft_flow_rule_offload_commit(struct net *net)
329 {
330 struct nft_trans *trans;
331 int err = 0;
332 u8 policy;
333
334 list_for_each_entry(trans, &net->nft.commit_list, list) {
335 if (trans->ctx.family != NFPROTO_NETDEV)
336 continue;
337
338 switch (trans->msg_type) {
339 case NFT_MSG_NEWCHAIN:
340 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
341 nft_trans_chain_update(trans))
342 continue;
343
344 policy = nft_trans_chain_policy(trans);
345 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
346 FLOW_BLOCK_BIND);
347 break;
348 case NFT_MSG_DELCHAIN:
349 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
350 continue;
351
352 policy = nft_trans_chain_policy(trans);
353 err = nft_flow_offload_chain(trans->ctx.chain, &policy,
354 FLOW_BLOCK_UNBIND);
355 break;
356 case NFT_MSG_NEWRULE:
357 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
358 continue;
359
360 if (trans->ctx.flags & NLM_F_REPLACE ||
361 !(trans->ctx.flags & NLM_F_APPEND)) {
362 err = -EOPNOTSUPP;
363 break;
364 }
365 err = nft_flow_offload_rule(trans->ctx.chain,
366 nft_trans_rule(trans),
367 nft_trans_flow_rule(trans),
368 FLOW_CLS_REPLACE);
369 break;
370 case NFT_MSG_DELRULE:
371 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
372 continue;
373
374 err = nft_flow_offload_rule(trans->ctx.chain,
375 nft_trans_rule(trans),
376 nft_trans_flow_rule(trans),
377 FLOW_CLS_DESTROY);
378 break;
379 }
380
381 if (err)
382 break;
383 }
384
385 list_for_each_entry(trans, &net->nft.commit_list, list) {
386 if (trans->ctx.family != NFPROTO_NETDEV)
387 continue;
388
389 switch (trans->msg_type) {
390 case NFT_MSG_NEWRULE:
391 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
392 continue;
393
394 nft_flow_rule_destroy(nft_trans_flow_rule(trans));
395 break;
396 default:
397 break;
398 }
399 }
400
401 return err;
402 }
403
404 static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
405 {
406 struct nft_base_chain *basechain;
407 struct net *net = dev_net(dev);
408 const struct nft_table *table;
409 struct nft_chain *chain;
410
411 list_for_each_entry(table, &net->nft.tables, list) {
412 if (table->family != NFPROTO_NETDEV)
413 continue;
414
415 list_for_each_entry(chain, &table->chains, list) {
416 if (!nft_is_base_chain(chain) ||
417 !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
418 continue;
419
420 basechain = nft_base_chain(chain);
421 if (strncmp(basechain->dev_name, dev->name, IFNAMSIZ))
422 continue;
423
424 return chain;
425 }
426 }
427
428 return NULL;
429 }
430
431 static void nft_indr_block_cb(struct net_device *dev,
432 flow_indr_block_bind_cb_t *cb, void *cb_priv,
433 enum flow_block_command cmd)
434 {
435 struct net *net = dev_net(dev);
436 struct nft_chain *chain;
437
438 mutex_lock(&net->nft.commit_mutex);
439 chain = __nft_offload_get_chain(dev);
440 if (chain && chain->flags & NFT_CHAIN_HW_OFFLOAD) {
441 struct nft_base_chain *basechain;
442
443 basechain = nft_base_chain(chain);
444 nft_indr_block_ing_cmd(dev, basechain, cb, cb_priv, cmd);
445 }
446 mutex_unlock(&net->nft.commit_mutex);
447 }
448
449 static void nft_offload_chain_clean(struct nft_chain *chain)
450 {
451 struct nft_rule *rule;
452
453 list_for_each_entry(rule, &chain->rules, list) {
454 nft_flow_offload_rule(chain, rule,
455 NULL, FLOW_CLS_DESTROY);
456 }
457
458 nft_flow_offload_chain(chain, NULL, FLOW_BLOCK_UNBIND);
459 }
460
461 static int nft_offload_netdev_event(struct notifier_block *this,
462 unsigned long event, void *ptr)
463 {
464 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
465 struct net *net = dev_net(dev);
466 struct nft_chain *chain;
467
468 if (event != NETDEV_UNREGISTER)
469 return NOTIFY_DONE;
470
471 mutex_lock(&net->nft.commit_mutex);
472 chain = __nft_offload_get_chain(dev);
473 if (chain)
474 nft_offload_chain_clean(chain);
475 mutex_unlock(&net->nft.commit_mutex);
476
477 return NOTIFY_DONE;
478 }
479
480 static struct flow_indr_block_entry block_ing_entry = {
481 .cb = nft_indr_block_cb,
482 .list = LIST_HEAD_INIT(block_ing_entry.list),
483 };
484
485 static struct notifier_block nft_offload_netdev_notifier = {
486 .notifier_call = nft_offload_netdev_event,
487 };
488
489 int nft_offload_init(void)
490 {
491 int err;
492
493 err = register_netdevice_notifier(&nft_offload_netdev_notifier);
494 if (err < 0)
495 return err;
496
497 flow_indr_add_block_cb(&block_ing_entry);
498
499 return 0;
500 }
501
502 void nft_offload_exit(void)
503 {
504 flow_indr_del_block_cb(&block_ing_entry);
505 unregister_netdevice_notifier(&nft_offload_netdev_notifier);
506 }