This source file includes following definitions.
- destroy_obj_hashfn
- tcf_proto_signal_destroying
- tcf_proto_cmp
- tcf_proto_exists_destroying
- tcf_proto_signal_destroyed
- __tcf_proto_lookup_ops
- tcf_proto_lookup_ops
- register_tcf_proto_ops
- unregister_tcf_proto_ops
- tcf_queue_work
- tcf_auto_prio
- tcf_proto_check_kind
- tcf_proto_is_unlocked
- tcf_proto_create
- tcf_proto_get
- tcf_proto_destroy
- tcf_proto_put
- tcf_proto_check_delete
- tcf_proto_mark_delete
- tcf_proto_is_deleting
- tcf_chain_create
- tcf_chain_head_change_item
- tcf_chain0_head_change
- tcf_chain_detach
- tcf_block_destroy
- tcf_chain_destroy
- tcf_chain_hold
- tcf_chain_held_by_acts_only
- tcf_chain_lookup
- __tcf_chain_get
- tcf_chain_get
- tcf_chain_get_by_act
- __tcf_chain_put
- tcf_chain_put
- tcf_chain_put_by_act
- tcf_chain_put_explicitly_created
- tcf_chain_flush
- tc_indr_block_cmd
- tc_dev_block
- tc_indr_block_get_and_cmd
- tc_indr_block_call
- tcf_block_offload_in_use
- tcf_block_offload_cmd
- tcf_block_offload_bind
- tcf_block_offload_unbind
- tcf_chain0_head_change_cb_add
- tcf_chain0_head_change_cb_del
- tcf_block_insert
- tcf_block_remove
- tcf_block_create
- tcf_block_lookup
- tcf_block_refcnt_get
- __tcf_get_next_chain
- tcf_get_next_chain
- __tcf_get_next_proto
- tcf_get_next_proto
- tcf_block_flush_all_chains
- __tcf_qdisc_find
- __tcf_qdisc_cl_find
- __tcf_block_find
- __tcf_block_put
- tcf_block_refcnt_put
- tcf_block_find
- tcf_block_release
- tcf_block_owner_netif_keep_dst
- tcf_block_netif_keep_dst
- tcf_block_owner_add
- tcf_block_owner_del
- tcf_block_get_ext
- tcf_chain_head_change_dflt
- tcf_block_get
- tcf_block_put_ext
- tcf_block_put
- tcf_block_playback_offloads
- tcf_block_bind
- tcf_block_unbind
- tcf_block_setup
- tcf_classify
- tcf_chain_tp_prev
- tcf_chain_tp_insert
- tcf_chain_tp_remove
- tcf_chain_tp_insert_unique
- tcf_chain_tp_delete_empty
- tcf_chain_tp_find
- tcf_fill_node
- tfilter_notify
- tfilter_del_notify
- tfilter_notify_chain
- tfilter_put
- tc_new_tfilter
- tc_del_tfilter
- tc_get_tfilter
- tcf_node_dump
- tcf_chain_dump
- tc_dump_tfilter
- tc_chain_fill_node
- tc_chain_notify
- tc_chain_notify_delete
- tc_chain_tmplt_add
- tc_chain_tmplt_del
- tc_ctl_chain
- tc_dump_chain
- tcf_exts_destroy
- tcf_exts_validate
- tcf_exts_change
- tcf_exts_first_act
- tcf_exts_dump
- tcf_exts_dump_stats
- tcf_block_offload_inc
- tcf_block_offload_dec
- tc_cls_offload_cnt_update
- tc_cls_offload_cnt_reset
- __tc_setup_cb_call
- tc_setup_cb_call
- tc_setup_cb_add
- tc_setup_cb_replace
- tc_setup_cb_destroy
- tc_setup_cb_reoffload
- tc_cleanup_flow_action
- tcf_mirred_get_dev
- tcf_tunnel_encap_put_tunnel
- tcf_tunnel_encap_get_tunnel
- tcf_sample_get_group
- tc_setup_flow_action
- tcf_exts_num_actions
- tcf_net_init
- tcf_net_exit
- tc_filter_init
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/slab.h>
22 #include <linux/idr.h>
23 #include <linux/rhashtable.h>
24 #include <linux/jhash.h>
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
29 #include <net/pkt_cls.h>
30 #include <net/tc_act/tc_pedit.h>
31 #include <net/tc_act/tc_mirred.h>
32 #include <net/tc_act/tc_vlan.h>
33 #include <net/tc_act/tc_tunnel_key.h>
34 #include <net/tc_act/tc_csum.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_police.h>
37 #include <net/tc_act/tc_sample.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <net/tc_act/tc_ct.h>
40 #include <net/tc_act/tc_mpls.h>
41 #include <net/flow_offload.h>
42
43 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
44
45
46 static LIST_HEAD(tcf_proto_base);
47
48
49 static DEFINE_RWLOCK(cls_mod_lock);
50
51 static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
52 {
53 return jhash_3words(tp->chain->index, tp->prio,
54 (__force __u32)tp->protocol, 0);
55 }
56
57 static void tcf_proto_signal_destroying(struct tcf_chain *chain,
58 struct tcf_proto *tp)
59 {
60 struct tcf_block *block = chain->block;
61
62 mutex_lock(&block->proto_destroy_lock);
63 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
64 destroy_obj_hashfn(tp));
65 mutex_unlock(&block->proto_destroy_lock);
66 }
67
68 static bool tcf_proto_cmp(const struct tcf_proto *tp1,
69 const struct tcf_proto *tp2)
70 {
71 return tp1->chain->index == tp2->chain->index &&
72 tp1->prio == tp2->prio &&
73 tp1->protocol == tp2->protocol;
74 }
75
76 static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
77 struct tcf_proto *tp)
78 {
79 u32 hash = destroy_obj_hashfn(tp);
80 struct tcf_proto *iter;
81 bool found = false;
82
83 rcu_read_lock();
84 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
85 destroy_ht_node, hash) {
86 if (tcf_proto_cmp(tp, iter)) {
87 found = true;
88 break;
89 }
90 }
91 rcu_read_unlock();
92
93 return found;
94 }
95
96 static void
97 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
98 {
99 struct tcf_block *block = chain->block;
100
101 mutex_lock(&block->proto_destroy_lock);
102 if (hash_hashed(&tp->destroy_ht_node))
103 hash_del_rcu(&tp->destroy_ht_node);
104 mutex_unlock(&block->proto_destroy_lock);
105 }
106
107
108
109 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
110 {
111 const struct tcf_proto_ops *t, *res = NULL;
112
113 if (kind) {
114 read_lock(&cls_mod_lock);
115 list_for_each_entry(t, &tcf_proto_base, head) {
116 if (strcmp(kind, t->kind) == 0) {
117 if (try_module_get(t->owner))
118 res = t;
119 break;
120 }
121 }
122 read_unlock(&cls_mod_lock);
123 }
124 return res;
125 }
126
127 static const struct tcf_proto_ops *
128 tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
129 struct netlink_ext_ack *extack)
130 {
131 const struct tcf_proto_ops *ops;
132
133 ops = __tcf_proto_lookup_ops(kind);
134 if (ops)
135 return ops;
136 #ifdef CONFIG_MODULES
137 if (rtnl_held)
138 rtnl_unlock();
139 request_module("cls_%s", kind);
140 if (rtnl_held)
141 rtnl_lock();
142 ops = __tcf_proto_lookup_ops(kind);
143
144
145
146
147
148 if (ops) {
149 module_put(ops->owner);
150 return ERR_PTR(-EAGAIN);
151 }
152 #endif
153 NL_SET_ERR_MSG(extack, "TC classifier not found");
154 return ERR_PTR(-ENOENT);
155 }
156
157
158
159 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
160 {
161 struct tcf_proto_ops *t;
162 int rc = -EEXIST;
163
164 write_lock(&cls_mod_lock);
165 list_for_each_entry(t, &tcf_proto_base, head)
166 if (!strcmp(ops->kind, t->kind))
167 goto out;
168
169 list_add_tail(&ops->head, &tcf_proto_base);
170 rc = 0;
171 out:
172 write_unlock(&cls_mod_lock);
173 return rc;
174 }
175 EXPORT_SYMBOL(register_tcf_proto_ops);
176
177 static struct workqueue_struct *tc_filter_wq;
178
179 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
180 {
181 struct tcf_proto_ops *t;
182 int rc = -ENOENT;
183
184
185
186
187 rcu_barrier();
188 flush_workqueue(tc_filter_wq);
189
190 write_lock(&cls_mod_lock);
191 list_for_each_entry(t, &tcf_proto_base, head) {
192 if (t == ops) {
193 list_del(&t->head);
194 rc = 0;
195 break;
196 }
197 }
198 write_unlock(&cls_mod_lock);
199 return rc;
200 }
201 EXPORT_SYMBOL(unregister_tcf_proto_ops);
202
203 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
204 {
205 INIT_RCU_WORK(rwork, func);
206 return queue_rcu_work(tc_filter_wq, rwork);
207 }
208 EXPORT_SYMBOL(tcf_queue_work);
209
210
211
212 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
213 {
214 u32 first = TC_H_MAKE(0xC0000000U, 0U);
215
216 if (tp)
217 first = tp->prio - 1;
218
219 return TC_H_MAJ(first);
220 }
221
222 static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
223 {
224 if (kind)
225 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
226 memset(name, 0, IFNAMSIZ);
227 return false;
228 }
229
230 static bool tcf_proto_is_unlocked(const char *kind)
231 {
232 const struct tcf_proto_ops *ops;
233 bool ret;
234
235 if (strlen(kind) == 0)
236 return false;
237
238 ops = tcf_proto_lookup_ops(kind, false, NULL);
239
240
241
242 if (IS_ERR(ops))
243 return false;
244
245 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
246 module_put(ops->owner);
247 return ret;
248 }
249
250 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
251 u32 prio, struct tcf_chain *chain,
252 bool rtnl_held,
253 struct netlink_ext_ack *extack)
254 {
255 struct tcf_proto *tp;
256 int err;
257
258 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
259 if (!tp)
260 return ERR_PTR(-ENOBUFS);
261
262 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
263 if (IS_ERR(tp->ops)) {
264 err = PTR_ERR(tp->ops);
265 goto errout;
266 }
267 tp->classify = tp->ops->classify;
268 tp->protocol = protocol;
269 tp->prio = prio;
270 tp->chain = chain;
271 spin_lock_init(&tp->lock);
272 refcount_set(&tp->refcnt, 1);
273
274 err = tp->ops->init(tp);
275 if (err) {
276 module_put(tp->ops->owner);
277 goto errout;
278 }
279 return tp;
280
281 errout:
282 kfree(tp);
283 return ERR_PTR(err);
284 }
285
286 static void tcf_proto_get(struct tcf_proto *tp)
287 {
288 refcount_inc(&tp->refcnt);
289 }
290
291 static void tcf_chain_put(struct tcf_chain *chain);
292
293 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
294 bool sig_destroy, struct netlink_ext_ack *extack)
295 {
296 tp->ops->destroy(tp, rtnl_held, extack);
297 if (sig_destroy)
298 tcf_proto_signal_destroyed(tp->chain, tp);
299 tcf_chain_put(tp->chain);
300 module_put(tp->ops->owner);
301 kfree_rcu(tp, rcu);
302 }
303
304 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
305 struct netlink_ext_ack *extack)
306 {
307 if (refcount_dec_and_test(&tp->refcnt))
308 tcf_proto_destroy(tp, rtnl_held, true, extack);
309 }
310
311 static bool tcf_proto_check_delete(struct tcf_proto *tp)
312 {
313 if (tp->ops->delete_empty)
314 return tp->ops->delete_empty(tp);
315
316 tp->deleting = true;
317 return tp->deleting;
318 }
319
320 static void tcf_proto_mark_delete(struct tcf_proto *tp)
321 {
322 spin_lock(&tp->lock);
323 tp->deleting = true;
324 spin_unlock(&tp->lock);
325 }
326
327 static bool tcf_proto_is_deleting(struct tcf_proto *tp)
328 {
329 bool deleting;
330
331 spin_lock(&tp->lock);
332 deleting = tp->deleting;
333 spin_unlock(&tp->lock);
334
335 return deleting;
336 }
337
338 #define ASSERT_BLOCK_LOCKED(block) \
339 lockdep_assert_held(&(block)->lock)
340
341 struct tcf_filter_chain_list_item {
342 struct list_head list;
343 tcf_chain_head_change_t *chain_head_change;
344 void *chain_head_change_priv;
345 };
346
347 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
348 u32 chain_index)
349 {
350 struct tcf_chain *chain;
351
352 ASSERT_BLOCK_LOCKED(block);
353
354 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
355 if (!chain)
356 return NULL;
357 list_add_tail(&chain->list, &block->chain_list);
358 mutex_init(&chain->filter_chain_lock);
359 chain->block = block;
360 chain->index = chain_index;
361 chain->refcnt = 1;
362 if (!chain->index)
363 block->chain0.chain = chain;
364 return chain;
365 }
366
367 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
368 struct tcf_proto *tp_head)
369 {
370 if (item->chain_head_change)
371 item->chain_head_change(tp_head, item->chain_head_change_priv);
372 }
373
374 static void tcf_chain0_head_change(struct tcf_chain *chain,
375 struct tcf_proto *tp_head)
376 {
377 struct tcf_filter_chain_list_item *item;
378 struct tcf_block *block = chain->block;
379
380 if (chain->index)
381 return;
382
383 mutex_lock(&block->lock);
384 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
385 tcf_chain_head_change_item(item, tp_head);
386 mutex_unlock(&block->lock);
387 }
388
389
390
391 static bool tcf_chain_detach(struct tcf_chain *chain)
392 {
393 struct tcf_block *block = chain->block;
394
395 ASSERT_BLOCK_LOCKED(block);
396
397 list_del(&chain->list);
398 if (!chain->index)
399 block->chain0.chain = NULL;
400
401 if (list_empty(&block->chain_list) &&
402 refcount_read(&block->refcnt) == 0)
403 return true;
404
405 return false;
406 }
407
408 static void tcf_block_destroy(struct tcf_block *block)
409 {
410 mutex_destroy(&block->lock);
411 mutex_destroy(&block->proto_destroy_lock);
412 kfree_rcu(block, rcu);
413 }
414
415 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
416 {
417 struct tcf_block *block = chain->block;
418
419 mutex_destroy(&chain->filter_chain_lock);
420 kfree_rcu(chain, rcu);
421 if (free_block)
422 tcf_block_destroy(block);
423 }
424
425 static void tcf_chain_hold(struct tcf_chain *chain)
426 {
427 ASSERT_BLOCK_LOCKED(chain->block);
428
429 ++chain->refcnt;
430 }
431
432 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
433 {
434 ASSERT_BLOCK_LOCKED(chain->block);
435
436
437
438
439 return chain->refcnt == chain->action_refcnt;
440 }
441
442 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
443 u32 chain_index)
444 {
445 struct tcf_chain *chain;
446
447 ASSERT_BLOCK_LOCKED(block);
448
449 list_for_each_entry(chain, &block->chain_list, list) {
450 if (chain->index == chain_index)
451 return chain;
452 }
453 return NULL;
454 }
455
456 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
457 u32 seq, u16 flags, int event, bool unicast);
458
459 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
460 u32 chain_index, bool create,
461 bool by_act)
462 {
463 struct tcf_chain *chain = NULL;
464 bool is_first_reference;
465
466 mutex_lock(&block->lock);
467 chain = tcf_chain_lookup(block, chain_index);
468 if (chain) {
469 tcf_chain_hold(chain);
470 } else {
471 if (!create)
472 goto errout;
473 chain = tcf_chain_create(block, chain_index);
474 if (!chain)
475 goto errout;
476 }
477
478 if (by_act)
479 ++chain->action_refcnt;
480 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
481 mutex_unlock(&block->lock);
482
483
484
485
486
487
488 if (is_first_reference && !by_act)
489 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
490 RTM_NEWCHAIN, false);
491
492 return chain;
493
494 errout:
495 mutex_unlock(&block->lock);
496 return chain;
497 }
498
499 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
500 bool create)
501 {
502 return __tcf_chain_get(block, chain_index, create, false);
503 }
504
505 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
506 {
507 return __tcf_chain_get(block, chain_index, true, true);
508 }
509 EXPORT_SYMBOL(tcf_chain_get_by_act);
510
511 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
512 void *tmplt_priv);
513 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
514 void *tmplt_priv, u32 chain_index,
515 struct tcf_block *block, struct sk_buff *oskb,
516 u32 seq, u16 flags, bool unicast);
517
518 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
519 bool explicitly_created)
520 {
521 struct tcf_block *block = chain->block;
522 const struct tcf_proto_ops *tmplt_ops;
523 bool free_block = false;
524 unsigned int refcnt;
525 void *tmplt_priv;
526
527 mutex_lock(&block->lock);
528 if (explicitly_created) {
529 if (!chain->explicitly_created) {
530 mutex_unlock(&block->lock);
531 return;
532 }
533 chain->explicitly_created = false;
534 }
535
536 if (by_act)
537 chain->action_refcnt--;
538
539
540
541
542
543 refcnt = --chain->refcnt;
544 tmplt_ops = chain->tmplt_ops;
545 tmplt_priv = chain->tmplt_priv;
546
547
548 if (refcnt - chain->action_refcnt == 0 && !by_act) {
549 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
550 block, NULL, 0, 0, false);
551
552 chain->flushing = false;
553 }
554
555 if (refcnt == 0)
556 free_block = tcf_chain_detach(chain);
557 mutex_unlock(&block->lock);
558
559 if (refcnt == 0) {
560 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
561 tcf_chain_destroy(chain, free_block);
562 }
563 }
564
565 static void tcf_chain_put(struct tcf_chain *chain)
566 {
567 __tcf_chain_put(chain, false, false);
568 }
569
570 void tcf_chain_put_by_act(struct tcf_chain *chain)
571 {
572 __tcf_chain_put(chain, true, false);
573 }
574 EXPORT_SYMBOL(tcf_chain_put_by_act);
575
576 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
577 {
578 __tcf_chain_put(chain, false, true);
579 }
580
581 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
582 {
583 struct tcf_proto *tp, *tp_next;
584
585 mutex_lock(&chain->filter_chain_lock);
586 tp = tcf_chain_dereference(chain->filter_chain, chain);
587 while (tp) {
588 tp_next = rcu_dereference_protected(tp->next, 1);
589 tcf_proto_signal_destroying(chain, tp);
590 tp = tp_next;
591 }
592 tp = tcf_chain_dereference(chain->filter_chain, chain);
593 RCU_INIT_POINTER(chain->filter_chain, NULL);
594 tcf_chain0_head_change(chain, NULL);
595 chain->flushing = true;
596 mutex_unlock(&chain->filter_chain_lock);
597
598 while (tp) {
599 tp_next = rcu_dereference_protected(tp->next, 1);
600 tcf_proto_put(tp, rtnl_held, NULL);
601 tp = tp_next;
602 }
603 }
604
605 static int tcf_block_setup(struct tcf_block *block,
606 struct flow_block_offload *bo);
607
608 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
609 flow_indr_block_bind_cb_t *cb, void *cb_priv,
610 enum flow_block_command command, bool ingress)
611 {
612 struct flow_block_offload bo = {
613 .command = command,
614 .binder_type = ingress ?
615 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
616 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
617 .net = dev_net(dev),
618 .block_shared = tcf_block_non_null_shared(block),
619 };
620 INIT_LIST_HEAD(&bo.cb_list);
621
622 if (!block)
623 return;
624
625 bo.block = &block->flow_block;
626
627 down_write(&block->cb_lock);
628 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
629
630 tcf_block_setup(block, &bo);
631 up_write(&block->cb_lock);
632 }
633
634 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
635 {
636 const struct Qdisc_class_ops *cops;
637 const struct Qdisc_ops *ops;
638 struct Qdisc *qdisc;
639
640 if (!dev_ingress_queue(dev))
641 return NULL;
642
643 qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
644 if (!qdisc)
645 return NULL;
646
647 ops = qdisc->ops;
648 if (!ops)
649 return NULL;
650
651 if (!ingress && !strcmp("ingress", ops->id))
652 return NULL;
653
654 cops = ops->cl_ops;
655 if (!cops)
656 return NULL;
657
658 if (!cops->tcf_block)
659 return NULL;
660
661 return cops->tcf_block(qdisc,
662 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
663 NULL);
664 }
665
666 static void tc_indr_block_get_and_cmd(struct net_device *dev,
667 flow_indr_block_bind_cb_t *cb,
668 void *cb_priv,
669 enum flow_block_command command)
670 {
671 struct tcf_block *block;
672
673 block = tc_dev_block(dev, true);
674 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
675
676 block = tc_dev_block(dev, false);
677 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
678 }
679
680 static void tc_indr_block_call(struct tcf_block *block,
681 struct net_device *dev,
682 struct tcf_block_ext_info *ei,
683 enum flow_block_command command,
684 struct netlink_ext_ack *extack)
685 {
686 struct flow_block_offload bo = {
687 .command = command,
688 .binder_type = ei->binder_type,
689 .net = dev_net(dev),
690 .block = &block->flow_block,
691 .block_shared = tcf_block_shared(block),
692 .extack = extack,
693 };
694 INIT_LIST_HEAD(&bo.cb_list);
695
696 flow_indr_block_call(dev, &bo, command);
697 tcf_block_setup(block, &bo);
698 }
699
700 static bool tcf_block_offload_in_use(struct tcf_block *block)
701 {
702 return atomic_read(&block->offloadcnt);
703 }
704
705 static int tcf_block_offload_cmd(struct tcf_block *block,
706 struct net_device *dev,
707 struct tcf_block_ext_info *ei,
708 enum flow_block_command command,
709 struct netlink_ext_ack *extack)
710 {
711 struct flow_block_offload bo = {};
712 int err;
713
714 bo.net = dev_net(dev);
715 bo.command = command;
716 bo.binder_type = ei->binder_type;
717 bo.block = &block->flow_block;
718 bo.block_shared = tcf_block_shared(block);
719 bo.extack = extack;
720 INIT_LIST_HEAD(&bo.cb_list);
721
722 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
723 if (err < 0)
724 return err;
725
726 return tcf_block_setup(block, &bo);
727 }
728
729 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
730 struct tcf_block_ext_info *ei,
731 struct netlink_ext_ack *extack)
732 {
733 struct net_device *dev = q->dev_queue->dev;
734 int err;
735
736 down_write(&block->cb_lock);
737 if (!dev->netdev_ops->ndo_setup_tc)
738 goto no_offload_dev_inc;
739
740
741
742
743 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
744 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
745 err = -EOPNOTSUPP;
746 goto err_unlock;
747 }
748
749 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
750 if (err == -EOPNOTSUPP)
751 goto no_offload_dev_inc;
752 if (err)
753 goto err_unlock;
754
755 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
756 up_write(&block->cb_lock);
757 return 0;
758
759 no_offload_dev_inc:
760 if (tcf_block_offload_in_use(block)) {
761 err = -EOPNOTSUPP;
762 goto err_unlock;
763 }
764 err = 0;
765 block->nooffloaddevcnt++;
766 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
767 err_unlock:
768 up_write(&block->cb_lock);
769 return err;
770 }
771
772 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
773 struct tcf_block_ext_info *ei)
774 {
775 struct net_device *dev = q->dev_queue->dev;
776 int err;
777
778 down_write(&block->cb_lock);
779 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
780
781 if (!dev->netdev_ops->ndo_setup_tc)
782 goto no_offload_dev_dec;
783 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
784 if (err == -EOPNOTSUPP)
785 goto no_offload_dev_dec;
786 up_write(&block->cb_lock);
787 return;
788
789 no_offload_dev_dec:
790 WARN_ON(block->nooffloaddevcnt-- == 0);
791 up_write(&block->cb_lock);
792 }
793
794 static int
795 tcf_chain0_head_change_cb_add(struct tcf_block *block,
796 struct tcf_block_ext_info *ei,
797 struct netlink_ext_ack *extack)
798 {
799 struct tcf_filter_chain_list_item *item;
800 struct tcf_chain *chain0;
801
802 item = kmalloc(sizeof(*item), GFP_KERNEL);
803 if (!item) {
804 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
805 return -ENOMEM;
806 }
807 item->chain_head_change = ei->chain_head_change;
808 item->chain_head_change_priv = ei->chain_head_change_priv;
809
810 mutex_lock(&block->lock);
811 chain0 = block->chain0.chain;
812 if (chain0)
813 tcf_chain_hold(chain0);
814 else
815 list_add(&item->list, &block->chain0.filter_chain_list);
816 mutex_unlock(&block->lock);
817
818 if (chain0) {
819 struct tcf_proto *tp_head;
820
821 mutex_lock(&chain0->filter_chain_lock);
822
823 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
824 if (tp_head)
825 tcf_chain_head_change_item(item, tp_head);
826
827 mutex_lock(&block->lock);
828 list_add(&item->list, &block->chain0.filter_chain_list);
829 mutex_unlock(&block->lock);
830
831 mutex_unlock(&chain0->filter_chain_lock);
832 tcf_chain_put(chain0);
833 }
834
835 return 0;
836 }
837
838 static void
839 tcf_chain0_head_change_cb_del(struct tcf_block *block,
840 struct tcf_block_ext_info *ei)
841 {
842 struct tcf_filter_chain_list_item *item;
843
844 mutex_lock(&block->lock);
845 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
846 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
847 (item->chain_head_change == ei->chain_head_change &&
848 item->chain_head_change_priv == ei->chain_head_change_priv)) {
849 if (block->chain0.chain)
850 tcf_chain_head_change_item(item, NULL);
851 list_del(&item->list);
852 mutex_unlock(&block->lock);
853
854 kfree(item);
855 return;
856 }
857 }
858 mutex_unlock(&block->lock);
859 WARN_ON(1);
860 }
861
862 struct tcf_net {
863 spinlock_t idr_lock;
864 struct idr idr;
865 };
866
867 static unsigned int tcf_net_id;
868
869 static int tcf_block_insert(struct tcf_block *block, struct net *net,
870 struct netlink_ext_ack *extack)
871 {
872 struct tcf_net *tn = net_generic(net, tcf_net_id);
873 int err;
874
875 idr_preload(GFP_KERNEL);
876 spin_lock(&tn->idr_lock);
877 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
878 GFP_NOWAIT);
879 spin_unlock(&tn->idr_lock);
880 idr_preload_end();
881
882 return err;
883 }
884
885 static void tcf_block_remove(struct tcf_block *block, struct net *net)
886 {
887 struct tcf_net *tn = net_generic(net, tcf_net_id);
888
889 spin_lock(&tn->idr_lock);
890 idr_remove(&tn->idr, block->index);
891 spin_unlock(&tn->idr_lock);
892 }
893
894 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
895 u32 block_index,
896 struct netlink_ext_ack *extack)
897 {
898 struct tcf_block *block;
899
900 block = kzalloc(sizeof(*block), GFP_KERNEL);
901 if (!block) {
902 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
903 return ERR_PTR(-ENOMEM);
904 }
905 mutex_init(&block->lock);
906 mutex_init(&block->proto_destroy_lock);
907 init_rwsem(&block->cb_lock);
908 flow_block_init(&block->flow_block);
909 INIT_LIST_HEAD(&block->chain_list);
910 INIT_LIST_HEAD(&block->owner_list);
911 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
912
913 refcount_set(&block->refcnt, 1);
914 block->net = net;
915 block->index = block_index;
916
917
918 if (!tcf_block_shared(block))
919 block->q = q;
920 return block;
921 }
922
923 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
924 {
925 struct tcf_net *tn = net_generic(net, tcf_net_id);
926
927 return idr_find(&tn->idr, block_index);
928 }
929
930 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
931 {
932 struct tcf_block *block;
933
934 rcu_read_lock();
935 block = tcf_block_lookup(net, block_index);
936 if (block && !refcount_inc_not_zero(&block->refcnt))
937 block = NULL;
938 rcu_read_unlock();
939
940 return block;
941 }
942
943 static struct tcf_chain *
944 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
945 {
946 mutex_lock(&block->lock);
947 if (chain)
948 chain = list_is_last(&chain->list, &block->chain_list) ?
949 NULL : list_next_entry(chain, list);
950 else
951 chain = list_first_entry_or_null(&block->chain_list,
952 struct tcf_chain, list);
953
954
955 while (chain && tcf_chain_held_by_acts_only(chain))
956 chain = list_is_last(&chain->list, &block->chain_list) ?
957 NULL : list_next_entry(chain, list);
958
959 if (chain)
960 tcf_chain_hold(chain);
961 mutex_unlock(&block->lock);
962
963 return chain;
964 }
965
966
967
968
969
970
971
972
973
974
975 struct tcf_chain *
976 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
977 {
978 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
979
980 if (chain)
981 tcf_chain_put(chain);
982
983 return chain_next;
984 }
985 EXPORT_SYMBOL(tcf_get_next_chain);
986
987 static struct tcf_proto *
988 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
989 {
990 u32 prio = 0;
991
992 ASSERT_RTNL();
993 mutex_lock(&chain->filter_chain_lock);
994
995 if (!tp) {
996 tp = tcf_chain_dereference(chain->filter_chain, chain);
997 } else if (tcf_proto_is_deleting(tp)) {
998
999
1000
1001
1002 prio = tp->prio + 1;
1003 tp = tcf_chain_dereference(chain->filter_chain, chain);
1004
1005 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1006 if (!tp->deleting && tp->prio >= prio)
1007 break;
1008 } else {
1009 tp = tcf_chain_dereference(tp->next, chain);
1010 }
1011
1012 if (tp)
1013 tcf_proto_get(tp);
1014
1015 mutex_unlock(&chain->filter_chain_lock);
1016
1017 return tp;
1018 }
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 struct tcf_proto *
1029 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1030 bool rtnl_held)
1031 {
1032 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1033
1034 if (tp)
1035 tcf_proto_put(tp, rtnl_held, NULL);
1036
1037 return tp_next;
1038 }
1039 EXPORT_SYMBOL(tcf_get_next_proto);
1040
1041 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1042 {
1043 struct tcf_chain *chain;
1044
1045
1046
1047
1048 for (chain = tcf_get_next_chain(block, NULL);
1049 chain;
1050 chain = tcf_get_next_chain(block, chain)) {
1051 tcf_chain_put_explicitly_created(chain);
1052 tcf_chain_flush(chain, rtnl_held);
1053 }
1054 }
1055
1056
1057
1058
1059
1060 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1061 u32 *parent, int ifindex, bool rtnl_held,
1062 struct netlink_ext_ack *extack)
1063 {
1064 const struct Qdisc_class_ops *cops;
1065 struct net_device *dev;
1066 int err = 0;
1067
1068 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1069 return 0;
1070
1071 rcu_read_lock();
1072
1073
1074 dev = dev_get_by_index_rcu(net, ifindex);
1075 if (!dev) {
1076 rcu_read_unlock();
1077 return -ENODEV;
1078 }
1079
1080
1081 if (!*parent) {
1082 *q = dev->qdisc;
1083 *parent = (*q)->handle;
1084 } else {
1085 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1086 if (!*q) {
1087 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1088 err = -EINVAL;
1089 goto errout_rcu;
1090 }
1091 }
1092
1093 *q = qdisc_refcount_inc_nz(*q);
1094 if (!*q) {
1095 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1096 err = -EINVAL;
1097 goto errout_rcu;
1098 }
1099
1100
1101 cops = (*q)->ops->cl_ops;
1102 if (!cops) {
1103 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1104 err = -EINVAL;
1105 goto errout_qdisc;
1106 }
1107
1108 if (!cops->tcf_block) {
1109 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1110 err = -EOPNOTSUPP;
1111 goto errout_qdisc;
1112 }
1113
1114 errout_rcu:
1115
1116
1117
1118
1119
1120 rcu_read_unlock();
1121 return err;
1122
1123 errout_qdisc:
1124 rcu_read_unlock();
1125
1126 if (rtnl_held)
1127 qdisc_put(*q);
1128 else
1129 qdisc_put_unlocked(*q);
1130 *q = NULL;
1131
1132 return err;
1133 }
1134
1135 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1136 int ifindex, struct netlink_ext_ack *extack)
1137 {
1138 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1139 return 0;
1140
1141
1142 if (TC_H_MIN(parent)) {
1143 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1144
1145 *cl = cops->find(q, parent);
1146 if (*cl == 0) {
1147 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1148 return -ENOENT;
1149 }
1150 }
1151
1152 return 0;
1153 }
1154
1155 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1156 unsigned long cl, int ifindex,
1157 u32 block_index,
1158 struct netlink_ext_ack *extack)
1159 {
1160 struct tcf_block *block;
1161
1162 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1163 block = tcf_block_refcnt_get(net, block_index);
1164 if (!block) {
1165 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1166 return ERR_PTR(-EINVAL);
1167 }
1168 } else {
1169 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1170
1171 block = cops->tcf_block(q, cl, extack);
1172 if (!block)
1173 return ERR_PTR(-EINVAL);
1174
1175 if (tcf_block_shared(block)) {
1176 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1177 return ERR_PTR(-EOPNOTSUPP);
1178 }
1179
1180
1181
1182
1183
1184
1185
1186 refcount_inc(&block->refcnt);
1187 }
1188
1189 return block;
1190 }
1191
1192 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1193 struct tcf_block_ext_info *ei, bool rtnl_held)
1194 {
1195 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1196
1197
1198
1199
1200
1201
1202 bool free_block = list_empty(&block->chain_list);
1203
1204 mutex_unlock(&block->lock);
1205 if (tcf_block_shared(block))
1206 tcf_block_remove(block, block->net);
1207
1208 if (q)
1209 tcf_block_offload_unbind(block, q, ei);
1210
1211 if (free_block)
1212 tcf_block_destroy(block);
1213 else
1214 tcf_block_flush_all_chains(block, rtnl_held);
1215 } else if (q) {
1216 tcf_block_offload_unbind(block, q, ei);
1217 }
1218 }
1219
1220 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1221 {
1222 __tcf_block_put(block, NULL, NULL, rtnl_held);
1223 }
1224
1225
1226
1227
1228
1229 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1230 u32 *parent, unsigned long *cl,
1231 int ifindex, u32 block_index,
1232 struct netlink_ext_ack *extack)
1233 {
1234 struct tcf_block *block;
1235 int err = 0;
1236
1237 ASSERT_RTNL();
1238
1239 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1240 if (err)
1241 goto errout;
1242
1243 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1244 if (err)
1245 goto errout_qdisc;
1246
1247 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1248 if (IS_ERR(block)) {
1249 err = PTR_ERR(block);
1250 goto errout_qdisc;
1251 }
1252
1253 return block;
1254
1255 errout_qdisc:
1256 if (*q)
1257 qdisc_put(*q);
1258 errout:
1259 *q = NULL;
1260 return ERR_PTR(err);
1261 }
1262
1263 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1264 bool rtnl_held)
1265 {
1266 if (!IS_ERR_OR_NULL(block))
1267 tcf_block_refcnt_put(block, rtnl_held);
1268
1269 if (q) {
1270 if (rtnl_held)
1271 qdisc_put(q);
1272 else
1273 qdisc_put_unlocked(q);
1274 }
1275 }
1276
1277 struct tcf_block_owner_item {
1278 struct list_head list;
1279 struct Qdisc *q;
1280 enum flow_block_binder_type binder_type;
1281 };
1282
1283 static void
1284 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1285 struct Qdisc *q,
1286 enum flow_block_binder_type binder_type)
1287 {
1288 if (block->keep_dst &&
1289 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1290 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1291 netif_keep_dst(qdisc_dev(q));
1292 }
1293
1294 void tcf_block_netif_keep_dst(struct tcf_block *block)
1295 {
1296 struct tcf_block_owner_item *item;
1297
1298 block->keep_dst = true;
1299 list_for_each_entry(item, &block->owner_list, list)
1300 tcf_block_owner_netif_keep_dst(block, item->q,
1301 item->binder_type);
1302 }
1303 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1304
1305 static int tcf_block_owner_add(struct tcf_block *block,
1306 struct Qdisc *q,
1307 enum flow_block_binder_type binder_type)
1308 {
1309 struct tcf_block_owner_item *item;
1310
1311 item = kmalloc(sizeof(*item), GFP_KERNEL);
1312 if (!item)
1313 return -ENOMEM;
1314 item->q = q;
1315 item->binder_type = binder_type;
1316 list_add(&item->list, &block->owner_list);
1317 return 0;
1318 }
1319
1320 static void tcf_block_owner_del(struct tcf_block *block,
1321 struct Qdisc *q,
1322 enum flow_block_binder_type binder_type)
1323 {
1324 struct tcf_block_owner_item *item;
1325
1326 list_for_each_entry(item, &block->owner_list, list) {
1327 if (item->q == q && item->binder_type == binder_type) {
1328 list_del(&item->list);
1329 kfree(item);
1330 return;
1331 }
1332 }
1333 WARN_ON(1);
1334 }
1335
1336 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1337 struct tcf_block_ext_info *ei,
1338 struct netlink_ext_ack *extack)
1339 {
1340 struct net *net = qdisc_net(q);
1341 struct tcf_block *block = NULL;
1342 int err;
1343
1344 if (ei->block_index)
1345
1346 block = tcf_block_refcnt_get(net, ei->block_index);
1347
1348 if (!block) {
1349 block = tcf_block_create(net, q, ei->block_index, extack);
1350 if (IS_ERR(block))
1351 return PTR_ERR(block);
1352 if (tcf_block_shared(block)) {
1353 err = tcf_block_insert(block, net, extack);
1354 if (err)
1355 goto err_block_insert;
1356 }
1357 }
1358
1359 err = tcf_block_owner_add(block, q, ei->binder_type);
1360 if (err)
1361 goto err_block_owner_add;
1362
1363 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1364
1365 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1366 if (err)
1367 goto err_chain0_head_change_cb_add;
1368
1369 err = tcf_block_offload_bind(block, q, ei, extack);
1370 if (err)
1371 goto err_block_offload_bind;
1372
1373 *p_block = block;
1374 return 0;
1375
1376 err_block_offload_bind:
1377 tcf_chain0_head_change_cb_del(block, ei);
1378 err_chain0_head_change_cb_add:
1379 tcf_block_owner_del(block, q, ei->binder_type);
1380 err_block_owner_add:
1381 err_block_insert:
1382 tcf_block_refcnt_put(block, true);
1383 return err;
1384 }
1385 EXPORT_SYMBOL(tcf_block_get_ext);
1386
1387 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1388 {
1389 struct tcf_proto __rcu **p_filter_chain = priv;
1390
1391 rcu_assign_pointer(*p_filter_chain, tp_head);
1392 }
1393
1394 int tcf_block_get(struct tcf_block **p_block,
1395 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1396 struct netlink_ext_ack *extack)
1397 {
1398 struct tcf_block_ext_info ei = {
1399 .chain_head_change = tcf_chain_head_change_dflt,
1400 .chain_head_change_priv = p_filter_chain,
1401 };
1402
1403 WARN_ON(!p_filter_chain);
1404 return tcf_block_get_ext(p_block, q, &ei, extack);
1405 }
1406 EXPORT_SYMBOL(tcf_block_get);
1407
1408
1409
1410
1411 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1412 struct tcf_block_ext_info *ei)
1413 {
1414 if (!block)
1415 return;
1416 tcf_chain0_head_change_cb_del(block, ei);
1417 tcf_block_owner_del(block, q, ei->binder_type);
1418
1419 __tcf_block_put(block, q, ei, true);
1420 }
1421 EXPORT_SYMBOL(tcf_block_put_ext);
1422
1423 void tcf_block_put(struct tcf_block *block)
1424 {
1425 struct tcf_block_ext_info ei = {0, };
1426
1427 if (!block)
1428 return;
1429 tcf_block_put_ext(block, block->q, &ei);
1430 }
1431
1432 EXPORT_SYMBOL(tcf_block_put);
1433
1434 static int
1435 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1436 void *cb_priv, bool add, bool offload_in_use,
1437 struct netlink_ext_ack *extack)
1438 {
1439 struct tcf_chain *chain, *chain_prev;
1440 struct tcf_proto *tp, *tp_prev;
1441 int err;
1442
1443 lockdep_assert_held(&block->cb_lock);
1444
1445 for (chain = __tcf_get_next_chain(block, NULL);
1446 chain;
1447 chain_prev = chain,
1448 chain = __tcf_get_next_chain(block, chain),
1449 tcf_chain_put(chain_prev)) {
1450 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1451 tp_prev = tp,
1452 tp = __tcf_get_next_proto(chain, tp),
1453 tcf_proto_put(tp_prev, true, NULL)) {
1454 if (tp->ops->reoffload) {
1455 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1456 extack);
1457 if (err && add)
1458 goto err_playback_remove;
1459 } else if (add && offload_in_use) {
1460 err = -EOPNOTSUPP;
1461 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1462 goto err_playback_remove;
1463 }
1464 }
1465 }
1466
1467 return 0;
1468
1469 err_playback_remove:
1470 tcf_proto_put(tp, true, NULL);
1471 tcf_chain_put(chain);
1472 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1473 extack);
1474 return err;
1475 }
1476
1477 static int tcf_block_bind(struct tcf_block *block,
1478 struct flow_block_offload *bo)
1479 {
1480 struct flow_block_cb *block_cb, *next;
1481 int err, i = 0;
1482
1483 lockdep_assert_held(&block->cb_lock);
1484
1485 list_for_each_entry(block_cb, &bo->cb_list, list) {
1486 err = tcf_block_playback_offloads(block, block_cb->cb,
1487 block_cb->cb_priv, true,
1488 tcf_block_offload_in_use(block),
1489 bo->extack);
1490 if (err)
1491 goto err_unroll;
1492 if (!bo->unlocked_driver_cb)
1493 block->lockeddevcnt++;
1494
1495 i++;
1496 }
1497 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1498
1499 return 0;
1500
1501 err_unroll:
1502 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1503 if (i-- > 0) {
1504 list_del(&block_cb->list);
1505 tcf_block_playback_offloads(block, block_cb->cb,
1506 block_cb->cb_priv, false,
1507 tcf_block_offload_in_use(block),
1508 NULL);
1509 if (!bo->unlocked_driver_cb)
1510 block->lockeddevcnt--;
1511 }
1512 flow_block_cb_free(block_cb);
1513 }
1514
1515 return err;
1516 }
1517
1518 static void tcf_block_unbind(struct tcf_block *block,
1519 struct flow_block_offload *bo)
1520 {
1521 struct flow_block_cb *block_cb, *next;
1522
1523 lockdep_assert_held(&block->cb_lock);
1524
1525 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1526 tcf_block_playback_offloads(block, block_cb->cb,
1527 block_cb->cb_priv, false,
1528 tcf_block_offload_in_use(block),
1529 NULL);
1530 list_del(&block_cb->list);
1531 flow_block_cb_free(block_cb);
1532 if (!bo->unlocked_driver_cb)
1533 block->lockeddevcnt--;
1534 }
1535 }
1536
1537 static int tcf_block_setup(struct tcf_block *block,
1538 struct flow_block_offload *bo)
1539 {
1540 int err;
1541
1542 switch (bo->command) {
1543 case FLOW_BLOCK_BIND:
1544 err = tcf_block_bind(block, bo);
1545 break;
1546 case FLOW_BLOCK_UNBIND:
1547 err = 0;
1548 tcf_block_unbind(block, bo);
1549 break;
1550 default:
1551 WARN_ON_ONCE(1);
1552 err = -EOPNOTSUPP;
1553 }
1554
1555 return err;
1556 }
1557
1558
1559
1560
1561
1562 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1563 struct tcf_result *res, bool compat_mode)
1564 {
1565 #ifdef CONFIG_NET_CLS_ACT
1566 const int max_reclassify_loop = 4;
1567 const struct tcf_proto *orig_tp = tp;
1568 const struct tcf_proto *first_tp;
1569 int limit = 0;
1570
1571 reclassify:
1572 #endif
1573 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1574 __be16 protocol = tc_skb_protocol(skb);
1575 int err;
1576
1577 if (tp->protocol != protocol &&
1578 tp->protocol != htons(ETH_P_ALL))
1579 continue;
1580
1581 err = tp->classify(skb, tp, res);
1582 #ifdef CONFIG_NET_CLS_ACT
1583 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1584 first_tp = orig_tp;
1585 goto reset;
1586 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1587 first_tp = res->goto_tp;
1588
1589 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1590 {
1591 struct tc_skb_ext *ext;
1592
1593 ext = skb_ext_add(skb, TC_SKB_EXT);
1594 if (WARN_ON_ONCE(!ext))
1595 return TC_ACT_SHOT;
1596
1597 ext->chain = err & TC_ACT_EXT_VAL_MASK;
1598 }
1599 #endif
1600 goto reset;
1601 }
1602 #endif
1603 if (err >= 0)
1604 return err;
1605 }
1606
1607 return TC_ACT_UNSPEC;
1608 #ifdef CONFIG_NET_CLS_ACT
1609 reset:
1610 if (unlikely(limit++ >= max_reclassify_loop)) {
1611 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1612 tp->chain->block->index,
1613 tp->prio & 0xffff,
1614 ntohs(tp->protocol));
1615 return TC_ACT_SHOT;
1616 }
1617
1618 tp = first_tp;
1619 goto reclassify;
1620 #endif
1621 }
1622 EXPORT_SYMBOL(tcf_classify);
1623
1624 struct tcf_chain_info {
1625 struct tcf_proto __rcu **pprev;
1626 struct tcf_proto __rcu *next;
1627 };
1628
1629 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1630 struct tcf_chain_info *chain_info)
1631 {
1632 return tcf_chain_dereference(*chain_info->pprev, chain);
1633 }
1634
1635 static int tcf_chain_tp_insert(struct tcf_chain *chain,
1636 struct tcf_chain_info *chain_info,
1637 struct tcf_proto *tp)
1638 {
1639 if (chain->flushing)
1640 return -EAGAIN;
1641
1642 if (*chain_info->pprev == chain->filter_chain)
1643 tcf_chain0_head_change(chain, tp);
1644 tcf_proto_get(tp);
1645 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1646 rcu_assign_pointer(*chain_info->pprev, tp);
1647
1648 return 0;
1649 }
1650
1651 static void tcf_chain_tp_remove(struct tcf_chain *chain,
1652 struct tcf_chain_info *chain_info,
1653 struct tcf_proto *tp)
1654 {
1655 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1656
1657 tcf_proto_mark_delete(tp);
1658 if (tp == chain->filter_chain)
1659 tcf_chain0_head_change(chain, next);
1660 RCU_INIT_POINTER(*chain_info->pprev, next);
1661 }
1662
1663 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1664 struct tcf_chain_info *chain_info,
1665 u32 protocol, u32 prio,
1666 bool prio_allocate);
1667
1668
1669
1670
1671
1672
1673 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1674 struct tcf_proto *tp_new,
1675 u32 protocol, u32 prio,
1676 bool rtnl_held)
1677 {
1678 struct tcf_chain_info chain_info;
1679 struct tcf_proto *tp;
1680 int err = 0;
1681
1682 mutex_lock(&chain->filter_chain_lock);
1683
1684 if (tcf_proto_exists_destroying(chain, tp_new)) {
1685 mutex_unlock(&chain->filter_chain_lock);
1686 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1687 return ERR_PTR(-EAGAIN);
1688 }
1689
1690 tp = tcf_chain_tp_find(chain, &chain_info,
1691 protocol, prio, false);
1692 if (!tp)
1693 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1694 mutex_unlock(&chain->filter_chain_lock);
1695
1696 if (tp) {
1697 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1698 tp_new = tp;
1699 } else if (err) {
1700 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1701 tp_new = ERR_PTR(err);
1702 }
1703
1704 return tp_new;
1705 }
1706
1707 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1708 struct tcf_proto *tp, bool rtnl_held,
1709 struct netlink_ext_ack *extack)
1710 {
1711 struct tcf_chain_info chain_info;
1712 struct tcf_proto *tp_iter;
1713 struct tcf_proto **pprev;
1714 struct tcf_proto *next;
1715
1716 mutex_lock(&chain->filter_chain_lock);
1717
1718
1719 for (pprev = &chain->filter_chain;
1720 (tp_iter = tcf_chain_dereference(*pprev, chain));
1721 pprev = &tp_iter->next) {
1722 if (tp_iter == tp) {
1723 chain_info.pprev = pprev;
1724 chain_info.next = tp_iter->next;
1725 WARN_ON(tp_iter->deleting);
1726 break;
1727 }
1728 }
1729
1730
1731
1732
1733 if (!tp_iter || !tcf_proto_check_delete(tp)) {
1734 mutex_unlock(&chain->filter_chain_lock);
1735 return;
1736 }
1737
1738 tcf_proto_signal_destroying(chain, tp);
1739 next = tcf_chain_dereference(chain_info.next, chain);
1740 if (tp == chain->filter_chain)
1741 tcf_chain0_head_change(chain, next);
1742 RCU_INIT_POINTER(*chain_info.pprev, next);
1743 mutex_unlock(&chain->filter_chain_lock);
1744
1745 tcf_proto_put(tp, rtnl_held, extack);
1746 }
1747
1748 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1749 struct tcf_chain_info *chain_info,
1750 u32 protocol, u32 prio,
1751 bool prio_allocate)
1752 {
1753 struct tcf_proto **pprev;
1754 struct tcf_proto *tp;
1755
1756
1757 for (pprev = &chain->filter_chain;
1758 (tp = tcf_chain_dereference(*pprev, chain));
1759 pprev = &tp->next) {
1760 if (tp->prio >= prio) {
1761 if (tp->prio == prio) {
1762 if (prio_allocate ||
1763 (tp->protocol != protocol && protocol))
1764 return ERR_PTR(-EINVAL);
1765 } else {
1766 tp = NULL;
1767 }
1768 break;
1769 }
1770 }
1771 chain_info->pprev = pprev;
1772 if (tp) {
1773 chain_info->next = tp->next;
1774 tcf_proto_get(tp);
1775 } else {
1776 chain_info->next = NULL;
1777 }
1778 return tp;
1779 }
1780
1781 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1782 struct tcf_proto *tp, struct tcf_block *block,
1783 struct Qdisc *q, u32 parent, void *fh,
1784 u32 portid, u32 seq, u16 flags, int event,
1785 bool rtnl_held)
1786 {
1787 struct tcmsg *tcm;
1788 struct nlmsghdr *nlh;
1789 unsigned char *b = skb_tail_pointer(skb);
1790
1791 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1792 if (!nlh)
1793 goto out_nlmsg_trim;
1794 tcm = nlmsg_data(nlh);
1795 tcm->tcm_family = AF_UNSPEC;
1796 tcm->tcm__pad1 = 0;
1797 tcm->tcm__pad2 = 0;
1798 if (q) {
1799 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1800 tcm->tcm_parent = parent;
1801 } else {
1802 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1803 tcm->tcm_block_index = block->index;
1804 }
1805 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1806 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1807 goto nla_put_failure;
1808 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1809 goto nla_put_failure;
1810 if (!fh) {
1811 tcm->tcm_handle = 0;
1812 } else {
1813 if (tp->ops->dump &&
1814 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1815 goto nla_put_failure;
1816 }
1817 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1818 return skb->len;
1819
1820 out_nlmsg_trim:
1821 nla_put_failure:
1822 nlmsg_trim(skb, b);
1823 return -1;
1824 }
1825
1826 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1827 struct nlmsghdr *n, struct tcf_proto *tp,
1828 struct tcf_block *block, struct Qdisc *q,
1829 u32 parent, void *fh, int event, bool unicast,
1830 bool rtnl_held)
1831 {
1832 struct sk_buff *skb;
1833 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1834 int err = 0;
1835
1836 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1837 if (!skb)
1838 return -ENOBUFS;
1839
1840 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1841 n->nlmsg_seq, n->nlmsg_flags, event,
1842 rtnl_held) <= 0) {
1843 kfree_skb(skb);
1844 return -EINVAL;
1845 }
1846
1847 if (unicast)
1848 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1849 else
1850 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1851 n->nlmsg_flags & NLM_F_ECHO);
1852
1853 if (err > 0)
1854 err = 0;
1855 return err;
1856 }
1857
1858 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1859 struct nlmsghdr *n, struct tcf_proto *tp,
1860 struct tcf_block *block, struct Qdisc *q,
1861 u32 parent, void *fh, bool unicast, bool *last,
1862 bool rtnl_held, struct netlink_ext_ack *extack)
1863 {
1864 struct sk_buff *skb;
1865 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1866 int err;
1867
1868 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1869 if (!skb)
1870 return -ENOBUFS;
1871
1872 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1873 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1874 rtnl_held) <= 0) {
1875 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1876 kfree_skb(skb);
1877 return -EINVAL;
1878 }
1879
1880 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1881 if (err) {
1882 kfree_skb(skb);
1883 return err;
1884 }
1885
1886 if (unicast)
1887 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1888 else
1889 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1890 n->nlmsg_flags & NLM_F_ECHO);
1891 if (err < 0)
1892 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1893
1894 if (err > 0)
1895 err = 0;
1896 return err;
1897 }
1898
1899 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1900 struct tcf_block *block, struct Qdisc *q,
1901 u32 parent, struct nlmsghdr *n,
1902 struct tcf_chain *chain, int event,
1903 bool rtnl_held)
1904 {
1905 struct tcf_proto *tp;
1906
1907 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1908 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1909 tfilter_notify(net, oskb, n, tp, block,
1910 q, parent, NULL, event, false, rtnl_held);
1911 }
1912
1913 static void tfilter_put(struct tcf_proto *tp, void *fh)
1914 {
1915 if (tp->ops->put && fh)
1916 tp->ops->put(tp, fh);
1917 }
1918
1919 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1920 struct netlink_ext_ack *extack)
1921 {
1922 struct net *net = sock_net(skb->sk);
1923 struct nlattr *tca[TCA_MAX + 1];
1924 char name[IFNAMSIZ];
1925 struct tcmsg *t;
1926 u32 protocol;
1927 u32 prio;
1928 bool prio_allocate;
1929 u32 parent;
1930 u32 chain_index;
1931 struct Qdisc *q = NULL;
1932 struct tcf_chain_info chain_info;
1933 struct tcf_chain *chain = NULL;
1934 struct tcf_block *block;
1935 struct tcf_proto *tp;
1936 unsigned long cl;
1937 void *fh;
1938 int err;
1939 int tp_created;
1940 bool rtnl_held = false;
1941
1942 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1943 return -EPERM;
1944
1945 replay:
1946 tp_created = 0;
1947
1948 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1949 rtm_tca_policy, extack);
1950 if (err < 0)
1951 return err;
1952
1953 t = nlmsg_data(n);
1954 protocol = TC_H_MIN(t->tcm_info);
1955 prio = TC_H_MAJ(t->tcm_info);
1956 prio_allocate = false;
1957 parent = t->tcm_parent;
1958 tp = NULL;
1959 cl = 0;
1960 block = NULL;
1961
1962 if (prio == 0) {
1963
1964
1965
1966 if (n->nlmsg_flags & NLM_F_CREATE) {
1967 prio = TC_H_MAKE(0x80000000U, 0U);
1968 prio_allocate = true;
1969 } else {
1970 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1971 return -ENOENT;
1972 }
1973 }
1974
1975
1976
1977 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
1978 if (err)
1979 return err;
1980
1981 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
1982 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
1983 err = -EINVAL;
1984 goto errout;
1985 }
1986
1987
1988
1989
1990
1991 if (rtnl_held ||
1992 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
1993 !tcf_proto_is_unlocked(name)) {
1994 rtnl_held = true;
1995 rtnl_lock();
1996 }
1997
1998 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
1999 if (err)
2000 goto errout;
2001
2002 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2003 extack);
2004 if (IS_ERR(block)) {
2005 err = PTR_ERR(block);
2006 goto errout;
2007 }
2008 block->classid = parent;
2009
2010 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2011 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2012 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2013 err = -EINVAL;
2014 goto errout;
2015 }
2016 chain = tcf_chain_get(block, chain_index, true);
2017 if (!chain) {
2018 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2019 err = -ENOMEM;
2020 goto errout;
2021 }
2022
2023 mutex_lock(&chain->filter_chain_lock);
2024 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2025 prio, prio_allocate);
2026 if (IS_ERR(tp)) {
2027 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2028 err = PTR_ERR(tp);
2029 goto errout_locked;
2030 }
2031
2032 if (tp == NULL) {
2033 struct tcf_proto *tp_new = NULL;
2034
2035 if (chain->flushing) {
2036 err = -EAGAIN;
2037 goto errout_locked;
2038 }
2039
2040
2041
2042 if (tca[TCA_KIND] == NULL || !protocol) {
2043 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2044 err = -EINVAL;
2045 goto errout_locked;
2046 }
2047
2048 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2049 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2050 err = -ENOENT;
2051 goto errout_locked;
2052 }
2053
2054 if (prio_allocate)
2055 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2056 &chain_info));
2057
2058 mutex_unlock(&chain->filter_chain_lock);
2059 tp_new = tcf_proto_create(name, protocol, prio, chain,
2060 rtnl_held, extack);
2061 if (IS_ERR(tp_new)) {
2062 err = PTR_ERR(tp_new);
2063 goto errout_tp;
2064 }
2065
2066 tp_created = 1;
2067 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2068 rtnl_held);
2069 if (IS_ERR(tp)) {
2070 err = PTR_ERR(tp);
2071 goto errout_tp;
2072 }
2073 } else {
2074 mutex_unlock(&chain->filter_chain_lock);
2075 }
2076
2077 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2078 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2079 err = -EINVAL;
2080 goto errout;
2081 }
2082
2083 fh = tp->ops->get(tp, t->tcm_handle);
2084
2085 if (!fh) {
2086 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2087 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2088 err = -ENOENT;
2089 goto errout;
2090 }
2091 } else if (n->nlmsg_flags & NLM_F_EXCL) {
2092 tfilter_put(tp, fh);
2093 NL_SET_ERR_MSG(extack, "Filter already exists");
2094 err = -EEXIST;
2095 goto errout;
2096 }
2097
2098 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2099 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2100 err = -EINVAL;
2101 goto errout;
2102 }
2103
2104 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2105 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2106 rtnl_held, extack);
2107 if (err == 0) {
2108 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2109 RTM_NEWTFILTER, false, rtnl_held);
2110 tfilter_put(tp, fh);
2111
2112 if (q)
2113 q->flags &= ~TCQ_F_CAN_BYPASS;
2114 }
2115
2116 errout:
2117 if (err && tp_created)
2118 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2119 errout_tp:
2120 if (chain) {
2121 if (tp && !IS_ERR(tp))
2122 tcf_proto_put(tp, rtnl_held, NULL);
2123 if (!tp_created)
2124 tcf_chain_put(chain);
2125 }
2126 tcf_block_release(q, block, rtnl_held);
2127
2128 if (rtnl_held)
2129 rtnl_unlock();
2130
2131 if (err == -EAGAIN) {
2132
2133
2134
2135 rtnl_held = true;
2136
2137 goto replay;
2138 }
2139 return err;
2140
2141 errout_locked:
2142 mutex_unlock(&chain->filter_chain_lock);
2143 goto errout;
2144 }
2145
2146 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2147 struct netlink_ext_ack *extack)
2148 {
2149 struct net *net = sock_net(skb->sk);
2150 struct nlattr *tca[TCA_MAX + 1];
2151 char name[IFNAMSIZ];
2152 struct tcmsg *t;
2153 u32 protocol;
2154 u32 prio;
2155 u32 parent;
2156 u32 chain_index;
2157 struct Qdisc *q = NULL;
2158 struct tcf_chain_info chain_info;
2159 struct tcf_chain *chain = NULL;
2160 struct tcf_block *block = NULL;
2161 struct tcf_proto *tp = NULL;
2162 unsigned long cl = 0;
2163 void *fh = NULL;
2164 int err;
2165 bool rtnl_held = false;
2166
2167 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2168 return -EPERM;
2169
2170 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2171 rtm_tca_policy, extack);
2172 if (err < 0)
2173 return err;
2174
2175 t = nlmsg_data(n);
2176 protocol = TC_H_MIN(t->tcm_info);
2177 prio = TC_H_MAJ(t->tcm_info);
2178 parent = t->tcm_parent;
2179
2180 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2181 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2182 return -ENOENT;
2183 }
2184
2185
2186
2187 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2188 if (err)
2189 return err;
2190
2191 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2192 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2193 err = -EINVAL;
2194 goto errout;
2195 }
2196
2197
2198
2199
2200 if (!prio ||
2201 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2202 !tcf_proto_is_unlocked(name)) {
2203 rtnl_held = true;
2204 rtnl_lock();
2205 }
2206
2207 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2208 if (err)
2209 goto errout;
2210
2211 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2212 extack);
2213 if (IS_ERR(block)) {
2214 err = PTR_ERR(block);
2215 goto errout;
2216 }
2217
2218 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2219 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2220 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2221 err = -EINVAL;
2222 goto errout;
2223 }
2224 chain = tcf_chain_get(block, chain_index, false);
2225 if (!chain) {
2226
2227
2228
2229 if (prio == 0) {
2230 err = 0;
2231 goto errout;
2232 }
2233 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2234 err = -ENOENT;
2235 goto errout;
2236 }
2237
2238 if (prio == 0) {
2239 tfilter_notify_chain(net, skb, block, q, parent, n,
2240 chain, RTM_DELTFILTER, rtnl_held);
2241 tcf_chain_flush(chain, rtnl_held);
2242 err = 0;
2243 goto errout;
2244 }
2245
2246 mutex_lock(&chain->filter_chain_lock);
2247 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2248 prio, false);
2249 if (!tp || IS_ERR(tp)) {
2250 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2251 err = tp ? PTR_ERR(tp) : -ENOENT;
2252 goto errout_locked;
2253 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2254 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2255 err = -EINVAL;
2256 goto errout_locked;
2257 } else if (t->tcm_handle == 0) {
2258 tcf_proto_signal_destroying(chain, tp);
2259 tcf_chain_tp_remove(chain, &chain_info, tp);
2260 mutex_unlock(&chain->filter_chain_lock);
2261
2262 tcf_proto_put(tp, rtnl_held, NULL);
2263 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2264 RTM_DELTFILTER, false, rtnl_held);
2265 err = 0;
2266 goto errout;
2267 }
2268 mutex_unlock(&chain->filter_chain_lock);
2269
2270 fh = tp->ops->get(tp, t->tcm_handle);
2271
2272 if (!fh) {
2273 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2274 err = -ENOENT;
2275 } else {
2276 bool last;
2277
2278 err = tfilter_del_notify(net, skb, n, tp, block,
2279 q, parent, fh, false, &last,
2280 rtnl_held, extack);
2281
2282 if (err)
2283 goto errout;
2284 if (last)
2285 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2286 }
2287
2288 errout:
2289 if (chain) {
2290 if (tp && !IS_ERR(tp))
2291 tcf_proto_put(tp, rtnl_held, NULL);
2292 tcf_chain_put(chain);
2293 }
2294 tcf_block_release(q, block, rtnl_held);
2295
2296 if (rtnl_held)
2297 rtnl_unlock();
2298
2299 return err;
2300
2301 errout_locked:
2302 mutex_unlock(&chain->filter_chain_lock);
2303 goto errout;
2304 }
2305
2306 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2307 struct netlink_ext_ack *extack)
2308 {
2309 struct net *net = sock_net(skb->sk);
2310 struct nlattr *tca[TCA_MAX + 1];
2311 char name[IFNAMSIZ];
2312 struct tcmsg *t;
2313 u32 protocol;
2314 u32 prio;
2315 u32 parent;
2316 u32 chain_index;
2317 struct Qdisc *q = NULL;
2318 struct tcf_chain_info chain_info;
2319 struct tcf_chain *chain = NULL;
2320 struct tcf_block *block = NULL;
2321 struct tcf_proto *tp = NULL;
2322 unsigned long cl = 0;
2323 void *fh = NULL;
2324 int err;
2325 bool rtnl_held = false;
2326
2327 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2328 rtm_tca_policy, extack);
2329 if (err < 0)
2330 return err;
2331
2332 t = nlmsg_data(n);
2333 protocol = TC_H_MIN(t->tcm_info);
2334 prio = TC_H_MAJ(t->tcm_info);
2335 parent = t->tcm_parent;
2336
2337 if (prio == 0) {
2338 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2339 return -ENOENT;
2340 }
2341
2342
2343
2344 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2345 if (err)
2346 return err;
2347
2348 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2349 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2350 err = -EINVAL;
2351 goto errout;
2352 }
2353
2354
2355
2356
2357 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2358 !tcf_proto_is_unlocked(name)) {
2359 rtnl_held = true;
2360 rtnl_lock();
2361 }
2362
2363 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2364 if (err)
2365 goto errout;
2366
2367 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2368 extack);
2369 if (IS_ERR(block)) {
2370 err = PTR_ERR(block);
2371 goto errout;
2372 }
2373
2374 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2375 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2376 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2377 err = -EINVAL;
2378 goto errout;
2379 }
2380 chain = tcf_chain_get(block, chain_index, false);
2381 if (!chain) {
2382 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2383 err = -EINVAL;
2384 goto errout;
2385 }
2386
2387 mutex_lock(&chain->filter_chain_lock);
2388 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2389 prio, false);
2390 mutex_unlock(&chain->filter_chain_lock);
2391 if (!tp || IS_ERR(tp)) {
2392 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2393 err = tp ? PTR_ERR(tp) : -ENOENT;
2394 goto errout;
2395 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2396 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2397 err = -EINVAL;
2398 goto errout;
2399 }
2400
2401 fh = tp->ops->get(tp, t->tcm_handle);
2402
2403 if (!fh) {
2404 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2405 err = -ENOENT;
2406 } else {
2407 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2408 fh, RTM_NEWTFILTER, true, rtnl_held);
2409 if (err < 0)
2410 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2411 }
2412
2413 tfilter_put(tp, fh);
2414 errout:
2415 if (chain) {
2416 if (tp && !IS_ERR(tp))
2417 tcf_proto_put(tp, rtnl_held, NULL);
2418 tcf_chain_put(chain);
2419 }
2420 tcf_block_release(q, block, rtnl_held);
2421
2422 if (rtnl_held)
2423 rtnl_unlock();
2424
2425 return err;
2426 }
2427
2428 struct tcf_dump_args {
2429 struct tcf_walker w;
2430 struct sk_buff *skb;
2431 struct netlink_callback *cb;
2432 struct tcf_block *block;
2433 struct Qdisc *q;
2434 u32 parent;
2435 };
2436
2437 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2438 {
2439 struct tcf_dump_args *a = (void *)arg;
2440 struct net *net = sock_net(a->skb->sk);
2441
2442 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2443 n, NETLINK_CB(a->cb->skb).portid,
2444 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2445 RTM_NEWTFILTER, true);
2446 }
2447
2448 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2449 struct sk_buff *skb, struct netlink_callback *cb,
2450 long index_start, long *p_index)
2451 {
2452 struct net *net = sock_net(skb->sk);
2453 struct tcf_block *block = chain->block;
2454 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2455 struct tcf_proto *tp, *tp_prev;
2456 struct tcf_dump_args arg;
2457
2458 for (tp = __tcf_get_next_proto(chain, NULL);
2459 tp;
2460 tp_prev = tp,
2461 tp = __tcf_get_next_proto(chain, tp),
2462 tcf_proto_put(tp_prev, true, NULL),
2463 (*p_index)++) {
2464 if (*p_index < index_start)
2465 continue;
2466 if (TC_H_MAJ(tcm->tcm_info) &&
2467 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2468 continue;
2469 if (TC_H_MIN(tcm->tcm_info) &&
2470 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2471 continue;
2472 if (*p_index > index_start)
2473 memset(&cb->args[1], 0,
2474 sizeof(cb->args) - sizeof(cb->args[0]));
2475 if (cb->args[1] == 0) {
2476 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2477 NETLINK_CB(cb->skb).portid,
2478 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2479 RTM_NEWTFILTER, true) <= 0)
2480 goto errout;
2481 cb->args[1] = 1;
2482 }
2483 if (!tp->ops->walk)
2484 continue;
2485 arg.w.fn = tcf_node_dump;
2486 arg.skb = skb;
2487 arg.cb = cb;
2488 arg.block = block;
2489 arg.q = q;
2490 arg.parent = parent;
2491 arg.w.stop = 0;
2492 arg.w.skip = cb->args[1] - 1;
2493 arg.w.count = 0;
2494 arg.w.cookie = cb->args[2];
2495 tp->ops->walk(tp, &arg.w, true);
2496 cb->args[2] = arg.w.cookie;
2497 cb->args[1] = arg.w.count + 1;
2498 if (arg.w.stop)
2499 goto errout;
2500 }
2501 return true;
2502
2503 errout:
2504 tcf_proto_put(tp, true, NULL);
2505 return false;
2506 }
2507
2508
2509 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2510 {
2511 struct tcf_chain *chain, *chain_prev;
2512 struct net *net = sock_net(skb->sk);
2513 struct nlattr *tca[TCA_MAX + 1];
2514 struct Qdisc *q = NULL;
2515 struct tcf_block *block;
2516 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2517 long index_start;
2518 long index;
2519 u32 parent;
2520 int err;
2521
2522 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2523 return skb->len;
2524
2525 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2526 NULL, cb->extack);
2527 if (err)
2528 return err;
2529
2530 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2531 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2532 if (!block)
2533 goto out;
2534
2535
2536
2537
2538
2539
2540 parent = 0;
2541 } else {
2542 const struct Qdisc_class_ops *cops;
2543 struct net_device *dev;
2544 unsigned long cl = 0;
2545
2546 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2547 if (!dev)
2548 return skb->len;
2549
2550 parent = tcm->tcm_parent;
2551 if (!parent)
2552 q = dev->qdisc;
2553 else
2554 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2555 if (!q)
2556 goto out;
2557 cops = q->ops->cl_ops;
2558 if (!cops)
2559 goto out;
2560 if (!cops->tcf_block)
2561 goto out;
2562 if (TC_H_MIN(tcm->tcm_parent)) {
2563 cl = cops->find(q, tcm->tcm_parent);
2564 if (cl == 0)
2565 goto out;
2566 }
2567 block = cops->tcf_block(q, cl, NULL);
2568 if (!block)
2569 goto out;
2570 parent = block->classid;
2571 if (tcf_block_shared(block))
2572 q = NULL;
2573 }
2574
2575 index_start = cb->args[0];
2576 index = 0;
2577
2578 for (chain = __tcf_get_next_chain(block, NULL);
2579 chain;
2580 chain_prev = chain,
2581 chain = __tcf_get_next_chain(block, chain),
2582 tcf_chain_put(chain_prev)) {
2583 if (tca[TCA_CHAIN] &&
2584 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2585 continue;
2586 if (!tcf_chain_dump(chain, q, parent, skb, cb,
2587 index_start, &index)) {
2588 tcf_chain_put(chain);
2589 err = -EMSGSIZE;
2590 break;
2591 }
2592 }
2593
2594 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2595 tcf_block_refcnt_put(block, true);
2596 cb->args[0] = index;
2597
2598 out:
2599
2600 if (skb->len == 0 && err)
2601 return err;
2602 return skb->len;
2603 }
2604
2605 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2606 void *tmplt_priv, u32 chain_index,
2607 struct net *net, struct sk_buff *skb,
2608 struct tcf_block *block,
2609 u32 portid, u32 seq, u16 flags, int event)
2610 {
2611 unsigned char *b = skb_tail_pointer(skb);
2612 const struct tcf_proto_ops *ops;
2613 struct nlmsghdr *nlh;
2614 struct tcmsg *tcm;
2615 void *priv;
2616
2617 ops = tmplt_ops;
2618 priv = tmplt_priv;
2619
2620 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2621 if (!nlh)
2622 goto out_nlmsg_trim;
2623 tcm = nlmsg_data(nlh);
2624 tcm->tcm_family = AF_UNSPEC;
2625 tcm->tcm__pad1 = 0;
2626 tcm->tcm__pad2 = 0;
2627 tcm->tcm_handle = 0;
2628 if (block->q) {
2629 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2630 tcm->tcm_parent = block->q->handle;
2631 } else {
2632 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2633 tcm->tcm_block_index = block->index;
2634 }
2635
2636 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2637 goto nla_put_failure;
2638
2639 if (ops) {
2640 if (nla_put_string(skb, TCA_KIND, ops->kind))
2641 goto nla_put_failure;
2642 if (ops->tmplt_dump(skb, net, priv) < 0)
2643 goto nla_put_failure;
2644 }
2645
2646 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2647 return skb->len;
2648
2649 out_nlmsg_trim:
2650 nla_put_failure:
2651 nlmsg_trim(skb, b);
2652 return -EMSGSIZE;
2653 }
2654
2655 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2656 u32 seq, u16 flags, int event, bool unicast)
2657 {
2658 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2659 struct tcf_block *block = chain->block;
2660 struct net *net = block->net;
2661 struct sk_buff *skb;
2662 int err = 0;
2663
2664 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2665 if (!skb)
2666 return -ENOBUFS;
2667
2668 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2669 chain->index, net, skb, block, portid,
2670 seq, flags, event) <= 0) {
2671 kfree_skb(skb);
2672 return -EINVAL;
2673 }
2674
2675 if (unicast)
2676 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2677 else
2678 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2679 flags & NLM_F_ECHO);
2680
2681 if (err > 0)
2682 err = 0;
2683 return err;
2684 }
2685
2686 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2687 void *tmplt_priv, u32 chain_index,
2688 struct tcf_block *block, struct sk_buff *oskb,
2689 u32 seq, u16 flags, bool unicast)
2690 {
2691 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2692 struct net *net = block->net;
2693 struct sk_buff *skb;
2694
2695 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2696 if (!skb)
2697 return -ENOBUFS;
2698
2699 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2700 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2701 kfree_skb(skb);
2702 return -EINVAL;
2703 }
2704
2705 if (unicast)
2706 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2707
2708 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2709 }
2710
2711 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2712 struct nlattr **tca,
2713 struct netlink_ext_ack *extack)
2714 {
2715 const struct tcf_proto_ops *ops;
2716 char name[IFNAMSIZ];
2717 void *tmplt_priv;
2718
2719
2720 if (!tca[TCA_KIND])
2721 return 0;
2722
2723 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2724 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2725 return -EINVAL;
2726 }
2727
2728 ops = tcf_proto_lookup_ops(name, true, extack);
2729 if (IS_ERR(ops))
2730 return PTR_ERR(ops);
2731 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2732 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2733 return -EOPNOTSUPP;
2734 }
2735
2736 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2737 if (IS_ERR(tmplt_priv)) {
2738 module_put(ops->owner);
2739 return PTR_ERR(tmplt_priv);
2740 }
2741 chain->tmplt_ops = ops;
2742 chain->tmplt_priv = tmplt_priv;
2743 return 0;
2744 }
2745
2746 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2747 void *tmplt_priv)
2748 {
2749
2750 if (!tmplt_ops)
2751 return;
2752
2753 tmplt_ops->tmplt_destroy(tmplt_priv);
2754 module_put(tmplt_ops->owner);
2755 }
2756
2757
2758
2759 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2760 struct netlink_ext_ack *extack)
2761 {
2762 struct net *net = sock_net(skb->sk);
2763 struct nlattr *tca[TCA_MAX + 1];
2764 struct tcmsg *t;
2765 u32 parent;
2766 u32 chain_index;
2767 struct Qdisc *q = NULL;
2768 struct tcf_chain *chain = NULL;
2769 struct tcf_block *block;
2770 unsigned long cl;
2771 int err;
2772
2773 if (n->nlmsg_type != RTM_GETCHAIN &&
2774 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2775 return -EPERM;
2776
2777 replay:
2778 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2779 rtm_tca_policy, extack);
2780 if (err < 0)
2781 return err;
2782
2783 t = nlmsg_data(n);
2784 parent = t->tcm_parent;
2785 cl = 0;
2786
2787 block = tcf_block_find(net, &q, &parent, &cl,
2788 t->tcm_ifindex, t->tcm_block_index, extack);
2789 if (IS_ERR(block))
2790 return PTR_ERR(block);
2791
2792 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2793 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2794 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2795 err = -EINVAL;
2796 goto errout_block;
2797 }
2798
2799 mutex_lock(&block->lock);
2800 chain = tcf_chain_lookup(block, chain_index);
2801 if (n->nlmsg_type == RTM_NEWCHAIN) {
2802 if (chain) {
2803 if (tcf_chain_held_by_acts_only(chain)) {
2804
2805
2806
2807 tcf_chain_hold(chain);
2808 } else {
2809 NL_SET_ERR_MSG(extack, "Filter chain already exists");
2810 err = -EEXIST;
2811 goto errout_block_locked;
2812 }
2813 } else {
2814 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2815 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2816 err = -ENOENT;
2817 goto errout_block_locked;
2818 }
2819 chain = tcf_chain_create(block, chain_index);
2820 if (!chain) {
2821 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2822 err = -ENOMEM;
2823 goto errout_block_locked;
2824 }
2825 }
2826 } else {
2827 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2828 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2829 err = -EINVAL;
2830 goto errout_block_locked;
2831 }
2832 tcf_chain_hold(chain);
2833 }
2834
2835 if (n->nlmsg_type == RTM_NEWCHAIN) {
2836
2837
2838
2839
2840
2841 tcf_chain_hold(chain);
2842 chain->explicitly_created = true;
2843 }
2844 mutex_unlock(&block->lock);
2845
2846 switch (n->nlmsg_type) {
2847 case RTM_NEWCHAIN:
2848 err = tc_chain_tmplt_add(chain, net, tca, extack);
2849 if (err) {
2850 tcf_chain_put_explicitly_created(chain);
2851 goto errout;
2852 }
2853
2854 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2855 RTM_NEWCHAIN, false);
2856 break;
2857 case RTM_DELCHAIN:
2858 tfilter_notify_chain(net, skb, block, q, parent, n,
2859 chain, RTM_DELTFILTER, true);
2860
2861 tcf_chain_flush(chain, true);
2862
2863
2864
2865 tcf_chain_put_explicitly_created(chain);
2866 break;
2867 case RTM_GETCHAIN:
2868 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2869 n->nlmsg_seq, n->nlmsg_type, true);
2870 if (err < 0)
2871 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2872 break;
2873 default:
2874 err = -EOPNOTSUPP;
2875 NL_SET_ERR_MSG(extack, "Unsupported message type");
2876 goto errout;
2877 }
2878
2879 errout:
2880 tcf_chain_put(chain);
2881 errout_block:
2882 tcf_block_release(q, block, true);
2883 if (err == -EAGAIN)
2884
2885 goto replay;
2886 return err;
2887
2888 errout_block_locked:
2889 mutex_unlock(&block->lock);
2890 goto errout_block;
2891 }
2892
2893
2894 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2895 {
2896 struct net *net = sock_net(skb->sk);
2897 struct nlattr *tca[TCA_MAX + 1];
2898 struct Qdisc *q = NULL;
2899 struct tcf_block *block;
2900 struct tcmsg *tcm = nlmsg_data(cb->nlh);
2901 struct tcf_chain *chain;
2902 long index_start;
2903 long index;
2904 u32 parent;
2905 int err;
2906
2907 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2908 return skb->len;
2909
2910 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2911 rtm_tca_policy, cb->extack);
2912 if (err)
2913 return err;
2914
2915 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2916 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2917 if (!block)
2918 goto out;
2919
2920
2921
2922
2923
2924
2925 parent = 0;
2926 } else {
2927 const struct Qdisc_class_ops *cops;
2928 struct net_device *dev;
2929 unsigned long cl = 0;
2930
2931 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2932 if (!dev)
2933 return skb->len;
2934
2935 parent = tcm->tcm_parent;
2936 if (!parent) {
2937 q = dev->qdisc;
2938 parent = q->handle;
2939 } else {
2940 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2941 }
2942 if (!q)
2943 goto out;
2944 cops = q->ops->cl_ops;
2945 if (!cops)
2946 goto out;
2947 if (!cops->tcf_block)
2948 goto out;
2949 if (TC_H_MIN(tcm->tcm_parent)) {
2950 cl = cops->find(q, tcm->tcm_parent);
2951 if (cl == 0)
2952 goto out;
2953 }
2954 block = cops->tcf_block(q, cl, NULL);
2955 if (!block)
2956 goto out;
2957 if (tcf_block_shared(block))
2958 q = NULL;
2959 }
2960
2961 index_start = cb->args[0];
2962 index = 0;
2963
2964 mutex_lock(&block->lock);
2965 list_for_each_entry(chain, &block->chain_list, list) {
2966 if ((tca[TCA_CHAIN] &&
2967 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
2968 continue;
2969 if (index < index_start) {
2970 index++;
2971 continue;
2972 }
2973 if (tcf_chain_held_by_acts_only(chain))
2974 continue;
2975 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2976 chain->index, net, skb, block,
2977 NETLINK_CB(cb->skb).portid,
2978 cb->nlh->nlmsg_seq, NLM_F_MULTI,
2979 RTM_NEWCHAIN);
2980 if (err <= 0)
2981 break;
2982 index++;
2983 }
2984 mutex_unlock(&block->lock);
2985
2986 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2987 tcf_block_refcnt_put(block, true);
2988 cb->args[0] = index;
2989
2990 out:
2991
2992 if (skb->len == 0 && err)
2993 return err;
2994 return skb->len;
2995 }
2996
2997 void tcf_exts_destroy(struct tcf_exts *exts)
2998 {
2999 #ifdef CONFIG_NET_CLS_ACT
3000 if (exts->actions) {
3001 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3002 kfree(exts->actions);
3003 }
3004 exts->nr_actions = 0;
3005 #endif
3006 }
3007 EXPORT_SYMBOL(tcf_exts_destroy);
3008
3009 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3010 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3011 bool rtnl_held, struct netlink_ext_ack *extack)
3012 {
3013 #ifdef CONFIG_NET_CLS_ACT
3014 {
3015 struct tc_action *act;
3016 size_t attr_size = 0;
3017
3018 if (exts->police && tb[exts->police]) {
3019 act = tcf_action_init_1(net, tp, tb[exts->police],
3020 rate_tlv, "police", ovr,
3021 TCA_ACT_BIND, rtnl_held,
3022 extack);
3023 if (IS_ERR(act))
3024 return PTR_ERR(act);
3025
3026 act->type = exts->type = TCA_OLD_COMPAT;
3027 exts->actions[0] = act;
3028 exts->nr_actions = 1;
3029 } else if (exts->action && tb[exts->action]) {
3030 int err;
3031
3032 err = tcf_action_init(net, tp, tb[exts->action],
3033 rate_tlv, NULL, ovr, TCA_ACT_BIND,
3034 exts->actions, &attr_size,
3035 rtnl_held, extack);
3036 if (err < 0)
3037 return err;
3038 exts->nr_actions = err;
3039 }
3040 }
3041 #else
3042 if ((exts->action && tb[exts->action]) ||
3043 (exts->police && tb[exts->police])) {
3044 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3045 return -EOPNOTSUPP;
3046 }
3047 #endif
3048
3049 return 0;
3050 }
3051 EXPORT_SYMBOL(tcf_exts_validate);
3052
3053 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3054 {
3055 #ifdef CONFIG_NET_CLS_ACT
3056 struct tcf_exts old = *dst;
3057
3058 *dst = *src;
3059 tcf_exts_destroy(&old);
3060 #endif
3061 }
3062 EXPORT_SYMBOL(tcf_exts_change);
3063
3064 #ifdef CONFIG_NET_CLS_ACT
3065 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3066 {
3067 if (exts->nr_actions == 0)
3068 return NULL;
3069 else
3070 return exts->actions[0];
3071 }
3072 #endif
3073
3074 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3075 {
3076 #ifdef CONFIG_NET_CLS_ACT
3077 struct nlattr *nest;
3078
3079 if (exts->action && tcf_exts_has_actions(exts)) {
3080
3081
3082
3083
3084
3085 if (exts->type != TCA_OLD_COMPAT) {
3086 nest = nla_nest_start_noflag(skb, exts->action);
3087 if (nest == NULL)
3088 goto nla_put_failure;
3089
3090 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3091 goto nla_put_failure;
3092 nla_nest_end(skb, nest);
3093 } else if (exts->police) {
3094 struct tc_action *act = tcf_exts_first_act(exts);
3095 nest = nla_nest_start_noflag(skb, exts->police);
3096 if (nest == NULL || !act)
3097 goto nla_put_failure;
3098 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3099 goto nla_put_failure;
3100 nla_nest_end(skb, nest);
3101 }
3102 }
3103 return 0;
3104
3105 nla_put_failure:
3106 nla_nest_cancel(skb, nest);
3107 return -1;
3108 #else
3109 return 0;
3110 #endif
3111 }
3112 EXPORT_SYMBOL(tcf_exts_dump);
3113
3114
3115 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3116 {
3117 #ifdef CONFIG_NET_CLS_ACT
3118 struct tc_action *a = tcf_exts_first_act(exts);
3119 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3120 return -1;
3121 #endif
3122 return 0;
3123 }
3124 EXPORT_SYMBOL(tcf_exts_dump_stats);
3125
3126 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3127 {
3128 if (*flags & TCA_CLS_FLAGS_IN_HW)
3129 return;
3130 *flags |= TCA_CLS_FLAGS_IN_HW;
3131 atomic_inc(&block->offloadcnt);
3132 }
3133
3134 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3135 {
3136 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3137 return;
3138 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3139 atomic_dec(&block->offloadcnt);
3140 }
3141
3142 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3143 struct tcf_proto *tp, u32 *cnt,
3144 u32 *flags, u32 diff, bool add)
3145 {
3146 lockdep_assert_held(&block->cb_lock);
3147
3148 spin_lock(&tp->lock);
3149 if (add) {
3150 if (!*cnt)
3151 tcf_block_offload_inc(block, flags);
3152 *cnt += diff;
3153 } else {
3154 *cnt -= diff;
3155 if (!*cnt)
3156 tcf_block_offload_dec(block, flags);
3157 }
3158 spin_unlock(&tp->lock);
3159 }
3160
3161 static void
3162 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3163 u32 *cnt, u32 *flags)
3164 {
3165 lockdep_assert_held(&block->cb_lock);
3166
3167 spin_lock(&tp->lock);
3168 tcf_block_offload_dec(block, flags);
3169 *cnt = 0;
3170 spin_unlock(&tp->lock);
3171 }
3172
3173 static int
3174 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3175 void *type_data, bool err_stop)
3176 {
3177 struct flow_block_cb *block_cb;
3178 int ok_count = 0;
3179 int err;
3180
3181 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3182 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3183 if (err) {
3184 if (err_stop)
3185 return err;
3186 } else {
3187 ok_count++;
3188 }
3189 }
3190 return ok_count;
3191 }
3192
3193 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3194 void *type_data, bool err_stop, bool rtnl_held)
3195 {
3196 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3197 int ok_count;
3198
3199 retry:
3200 if (take_rtnl)
3201 rtnl_lock();
3202 down_read(&block->cb_lock);
3203
3204
3205
3206
3207 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3208 up_read(&block->cb_lock);
3209 take_rtnl = true;
3210 goto retry;
3211 }
3212
3213 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3214
3215 up_read(&block->cb_lock);
3216 if (take_rtnl)
3217 rtnl_unlock();
3218 return ok_count;
3219 }
3220 EXPORT_SYMBOL(tc_setup_cb_call);
3221
3222
3223
3224
3225
3226
3227
3228 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3229 enum tc_setup_type type, void *type_data, bool err_stop,
3230 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3231 {
3232 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3233 int ok_count;
3234
3235 retry:
3236 if (take_rtnl)
3237 rtnl_lock();
3238 down_read(&block->cb_lock);
3239
3240
3241
3242
3243 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3244 up_read(&block->cb_lock);
3245 take_rtnl = true;
3246 goto retry;
3247 }
3248
3249
3250 if (block->nooffloaddevcnt && err_stop) {
3251 ok_count = -EOPNOTSUPP;
3252 goto err_unlock;
3253 }
3254
3255 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3256 if (ok_count < 0)
3257 goto err_unlock;
3258
3259 if (tp->ops->hw_add)
3260 tp->ops->hw_add(tp, type_data);
3261 if (ok_count > 0)
3262 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3263 ok_count, true);
3264 err_unlock:
3265 up_read(&block->cb_lock);
3266 if (take_rtnl)
3267 rtnl_unlock();
3268 return ok_count < 0 ? ok_count : 0;
3269 }
3270 EXPORT_SYMBOL(tc_setup_cb_add);
3271
3272
3273
3274
3275
3276
3277
3278 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3279 enum tc_setup_type type, void *type_data, bool err_stop,
3280 u32 *old_flags, unsigned int *old_in_hw_count,
3281 u32 *new_flags, unsigned int *new_in_hw_count,
3282 bool rtnl_held)
3283 {
3284 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3285 int ok_count;
3286
3287 retry:
3288 if (take_rtnl)
3289 rtnl_lock();
3290 down_read(&block->cb_lock);
3291
3292
3293
3294
3295 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3296 up_read(&block->cb_lock);
3297 take_rtnl = true;
3298 goto retry;
3299 }
3300
3301
3302 if (block->nooffloaddevcnt && err_stop) {
3303 ok_count = -EOPNOTSUPP;
3304 goto err_unlock;
3305 }
3306
3307 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3308 if (tp->ops->hw_del)
3309 tp->ops->hw_del(tp, type_data);
3310
3311 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3312 if (ok_count < 0)
3313 goto err_unlock;
3314
3315 if (tp->ops->hw_add)
3316 tp->ops->hw_add(tp, type_data);
3317 if (ok_count > 0)
3318 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3319 new_flags, ok_count, true);
3320 err_unlock:
3321 up_read(&block->cb_lock);
3322 if (take_rtnl)
3323 rtnl_unlock();
3324 return ok_count < 0 ? ok_count : 0;
3325 }
3326 EXPORT_SYMBOL(tc_setup_cb_replace);
3327
3328
3329
3330
3331
3332 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3333 enum tc_setup_type type, void *type_data, bool err_stop,
3334 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3335 {
3336 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3337 int ok_count;
3338
3339 retry:
3340 if (take_rtnl)
3341 rtnl_lock();
3342 down_read(&block->cb_lock);
3343
3344
3345
3346
3347 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3348 up_read(&block->cb_lock);
3349 take_rtnl = true;
3350 goto retry;
3351 }
3352
3353 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3354
3355 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3356 if (tp->ops->hw_del)
3357 tp->ops->hw_del(tp, type_data);
3358
3359 up_read(&block->cb_lock);
3360 if (take_rtnl)
3361 rtnl_unlock();
3362 return ok_count < 0 ? ok_count : 0;
3363 }
3364 EXPORT_SYMBOL(tc_setup_cb_destroy);
3365
3366 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3367 bool add, flow_setup_cb_t *cb,
3368 enum tc_setup_type type, void *type_data,
3369 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3370 {
3371 int err = cb(type, type_data, cb_priv);
3372
3373 if (err) {
3374 if (add && tc_skip_sw(*flags))
3375 return err;
3376 } else {
3377 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3378 add);
3379 }
3380
3381 return 0;
3382 }
3383 EXPORT_SYMBOL(tc_setup_cb_reoffload);
3384
3385 void tc_cleanup_flow_action(struct flow_action *flow_action)
3386 {
3387 struct flow_action_entry *entry;
3388 int i;
3389
3390 flow_action_for_each(i, entry, flow_action)
3391 if (entry->destructor)
3392 entry->destructor(entry->destructor_priv);
3393 }
3394 EXPORT_SYMBOL(tc_cleanup_flow_action);
3395
3396 static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3397 const struct tc_action *act)
3398 {
3399 #ifdef CONFIG_NET_CLS_ACT
3400 entry->dev = act->ops->get_dev(act, &entry->destructor);
3401 if (!entry->dev)
3402 return;
3403 entry->destructor_priv = entry->dev;
3404 #endif
3405 }
3406
3407 static void tcf_tunnel_encap_put_tunnel(void *priv)
3408 {
3409 struct ip_tunnel_info *tunnel = priv;
3410
3411 kfree(tunnel);
3412 }
3413
3414 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3415 const struct tc_action *act)
3416 {
3417 entry->tunnel = tcf_tunnel_info_copy(act);
3418 if (!entry->tunnel)
3419 return -ENOMEM;
3420 entry->destructor = tcf_tunnel_encap_put_tunnel;
3421 entry->destructor_priv = entry->tunnel;
3422 return 0;
3423 }
3424
3425 static void tcf_sample_get_group(struct flow_action_entry *entry,
3426 const struct tc_action *act)
3427 {
3428 #ifdef CONFIG_NET_CLS_ACT
3429 entry->sample.psample_group =
3430 act->ops->get_psample_group(act, &entry->destructor);
3431 entry->destructor_priv = entry->sample.psample_group;
3432 #endif
3433 }
3434
3435 int tc_setup_flow_action(struct flow_action *flow_action,
3436 const struct tcf_exts *exts, bool rtnl_held)
3437 {
3438 const struct tc_action *act;
3439 int i, j, k, err = 0;
3440
3441 if (!exts)
3442 return 0;
3443
3444 if (!rtnl_held)
3445 rtnl_lock();
3446
3447 j = 0;
3448 tcf_exts_for_each_action(i, act, exts) {
3449 struct flow_action_entry *entry;
3450
3451 entry = &flow_action->entries[j];
3452 if (is_tcf_gact_ok(act)) {
3453 entry->id = FLOW_ACTION_ACCEPT;
3454 } else if (is_tcf_gact_shot(act)) {
3455 entry->id = FLOW_ACTION_DROP;
3456 } else if (is_tcf_gact_trap(act)) {
3457 entry->id = FLOW_ACTION_TRAP;
3458 } else if (is_tcf_gact_goto_chain(act)) {
3459 entry->id = FLOW_ACTION_GOTO;
3460 entry->chain_index = tcf_gact_goto_chain_index(act);
3461 } else if (is_tcf_mirred_egress_redirect(act)) {
3462 entry->id = FLOW_ACTION_REDIRECT;
3463 tcf_mirred_get_dev(entry, act);
3464 } else if (is_tcf_mirred_egress_mirror(act)) {
3465 entry->id = FLOW_ACTION_MIRRED;
3466 tcf_mirred_get_dev(entry, act);
3467 } else if (is_tcf_mirred_ingress_redirect(act)) {
3468 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3469 tcf_mirred_get_dev(entry, act);
3470 } else if (is_tcf_mirred_ingress_mirror(act)) {
3471 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3472 tcf_mirred_get_dev(entry, act);
3473 } else if (is_tcf_vlan(act)) {
3474 switch (tcf_vlan_action(act)) {
3475 case TCA_VLAN_ACT_PUSH:
3476 entry->id = FLOW_ACTION_VLAN_PUSH;
3477 entry->vlan.vid = tcf_vlan_push_vid(act);
3478 entry->vlan.proto = tcf_vlan_push_proto(act);
3479 entry->vlan.prio = tcf_vlan_push_prio(act);
3480 break;
3481 case TCA_VLAN_ACT_POP:
3482 entry->id = FLOW_ACTION_VLAN_POP;
3483 break;
3484 case TCA_VLAN_ACT_MODIFY:
3485 entry->id = FLOW_ACTION_VLAN_MANGLE;
3486 entry->vlan.vid = tcf_vlan_push_vid(act);
3487 entry->vlan.proto = tcf_vlan_push_proto(act);
3488 entry->vlan.prio = tcf_vlan_push_prio(act);
3489 break;
3490 default:
3491 err = -EOPNOTSUPP;
3492 goto err_out;
3493 }
3494 } else if (is_tcf_tunnel_set(act)) {
3495 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3496 err = tcf_tunnel_encap_get_tunnel(entry, act);
3497 if (err)
3498 goto err_out;
3499 } else if (is_tcf_tunnel_release(act)) {
3500 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3501 } else if (is_tcf_pedit(act)) {
3502 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3503 switch (tcf_pedit_cmd(act, k)) {
3504 case TCA_PEDIT_KEY_EX_CMD_SET:
3505 entry->id = FLOW_ACTION_MANGLE;
3506 break;
3507 case TCA_PEDIT_KEY_EX_CMD_ADD:
3508 entry->id = FLOW_ACTION_ADD;
3509 break;
3510 default:
3511 err = -EOPNOTSUPP;
3512 goto err_out;
3513 }
3514 entry->mangle.htype = tcf_pedit_htype(act, k);
3515 entry->mangle.mask = tcf_pedit_mask(act, k);
3516 entry->mangle.val = tcf_pedit_val(act, k);
3517 entry->mangle.offset = tcf_pedit_offset(act, k);
3518 entry = &flow_action->entries[++j];
3519 }
3520 } else if (is_tcf_csum(act)) {
3521 entry->id = FLOW_ACTION_CSUM;
3522 entry->csum_flags = tcf_csum_update_flags(act);
3523 } else if (is_tcf_skbedit_mark(act)) {
3524 entry->id = FLOW_ACTION_MARK;
3525 entry->mark = tcf_skbedit_mark(act);
3526 } else if (is_tcf_sample(act)) {
3527 entry->id = FLOW_ACTION_SAMPLE;
3528 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3529 entry->sample.truncate = tcf_sample_truncate(act);
3530 entry->sample.rate = tcf_sample_rate(act);
3531 tcf_sample_get_group(entry, act);
3532 } else if (is_tcf_police(act)) {
3533 entry->id = FLOW_ACTION_POLICE;
3534 entry->police.burst = tcf_police_tcfp_burst(act);
3535 entry->police.rate_bytes_ps =
3536 tcf_police_rate_bytes_ps(act);
3537 } else if (is_tcf_ct(act)) {
3538 entry->id = FLOW_ACTION_CT;
3539 entry->ct.action = tcf_ct_action(act);
3540 entry->ct.zone = tcf_ct_zone(act);
3541 } else if (is_tcf_mpls(act)) {
3542 switch (tcf_mpls_action(act)) {
3543 case TCA_MPLS_ACT_PUSH:
3544 entry->id = FLOW_ACTION_MPLS_PUSH;
3545 entry->mpls_push.proto = tcf_mpls_proto(act);
3546 entry->mpls_push.label = tcf_mpls_label(act);
3547 entry->mpls_push.tc = tcf_mpls_tc(act);
3548 entry->mpls_push.bos = tcf_mpls_bos(act);
3549 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3550 break;
3551 case TCA_MPLS_ACT_POP:
3552 entry->id = FLOW_ACTION_MPLS_POP;
3553 entry->mpls_pop.proto = tcf_mpls_proto(act);
3554 break;
3555 case TCA_MPLS_ACT_MODIFY:
3556 entry->id = FLOW_ACTION_MPLS_MANGLE;
3557 entry->mpls_mangle.label = tcf_mpls_label(act);
3558 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3559 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3560 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3561 break;
3562 default:
3563 goto err_out;
3564 }
3565 } else if (is_tcf_skbedit_ptype(act)) {
3566 entry->id = FLOW_ACTION_PTYPE;
3567 entry->ptype = tcf_skbedit_ptype(act);
3568 } else {
3569 err = -EOPNOTSUPP;
3570 goto err_out;
3571 }
3572
3573 if (!is_tcf_pedit(act))
3574 j++;
3575 }
3576
3577 err_out:
3578 if (!rtnl_held)
3579 rtnl_unlock();
3580
3581 if (err)
3582 tc_cleanup_flow_action(flow_action);
3583
3584 return err;
3585 }
3586 EXPORT_SYMBOL(tc_setup_flow_action);
3587
3588 unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3589 {
3590 unsigned int num_acts = 0;
3591 struct tc_action *act;
3592 int i;
3593
3594 tcf_exts_for_each_action(i, act, exts) {
3595 if (is_tcf_pedit(act))
3596 num_acts += tcf_pedit_nkeys(act);
3597 else
3598 num_acts++;
3599 }
3600 return num_acts;
3601 }
3602 EXPORT_SYMBOL(tcf_exts_num_actions);
3603
3604 static __net_init int tcf_net_init(struct net *net)
3605 {
3606 struct tcf_net *tn = net_generic(net, tcf_net_id);
3607
3608 spin_lock_init(&tn->idr_lock);
3609 idr_init(&tn->idr);
3610 return 0;
3611 }
3612
3613 static void __net_exit tcf_net_exit(struct net *net)
3614 {
3615 struct tcf_net *tn = net_generic(net, tcf_net_id);
3616
3617 idr_destroy(&tn->idr);
3618 }
3619
3620 static struct pernet_operations tcf_net_ops = {
3621 .init = tcf_net_init,
3622 .exit = tcf_net_exit,
3623 .id = &tcf_net_id,
3624 .size = sizeof(struct tcf_net),
3625 };
3626
3627 static struct flow_indr_block_entry block_entry = {
3628 .cb = tc_indr_block_get_and_cmd,
3629 .list = LIST_HEAD_INIT(block_entry.list),
3630 };
3631
3632 static int __init tc_filter_init(void)
3633 {
3634 int err;
3635
3636 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3637 if (!tc_filter_wq)
3638 return -ENOMEM;
3639
3640 err = register_pernet_subsys(&tcf_net_ops);
3641 if (err)
3642 goto err_register_pernet_subsys;
3643
3644 flow_indr_add_block_cb(&block_entry);
3645
3646 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3647 RTNL_FLAG_DOIT_UNLOCKED);
3648 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3649 RTNL_FLAG_DOIT_UNLOCKED);
3650 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3651 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3652 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3653 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3654 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3655 tc_dump_chain, 0);
3656
3657 return 0;
3658
3659 err_register_pernet_subsys:
3660 destroy_workqueue(tc_filter_wq);
3661 return err;
3662 }
3663
3664 subsys_initcall(tc_filter_init);