This source file includes following definitions.
- ip6mr_mr_table_iter
- ip6mr_get_table
- ip6mr_fib_lookup
- ip6mr_rule_action
- ip6mr_rule_match
- ip6mr_rule_configure
- ip6mr_rule_compare
- ip6mr_rule_fill
- ip6mr_rules_init
- ip6mr_rules_exit
- ip6mr_rules_dump
- ip6mr_rules_seq_read
- ip6mr_rule_default
- ip6mr_mr_table_iter
- ip6mr_get_table
- ip6mr_fib_lookup
- ip6mr_rules_init
- ip6mr_rules_exit
- ip6mr_rules_dump
- ip6mr_rules_seq_read
- ip6mr_hash_cmp
- ip6mr_new_table_set
- ip6mr_new_table
- ip6mr_free_table
- ip6mr_vif_seq_start
- ip6mr_vif_seq_stop
- ip6mr_vif_seq_show
- ipmr_mfc_seq_start
- ipmr_mfc_seq_show
- pim6_rcv
- reg_vif_xmit
- reg_vif_get_iflink
- reg_vif_setup
- ip6mr_reg_vif
- call_ip6mr_vif_entry_notifiers
- call_ip6mr_mfc_entry_notifiers
- mif6_delete
- ip6mr_cache_free_rcu
- ip6mr_cache_free
- ip6mr_destroy_unres
- ipmr_do_expire_process
- ipmr_expire_process
- ip6mr_update_thresholds
- mif6_add
- ip6mr_cache_find
- ip6mr_cache_find_any
- ip6mr_cache_find_parent
- ip6mr_cache_alloc
- ip6mr_cache_alloc_unres
- ip6mr_cache_resolve
- ip6mr_cache_report
- ip6mr_cache_unresolved
- ip6mr_mfc_delete
- ip6mr_device_event
- ip6mr_seq_read
- ip6mr_dump
- ip6mr_notifier_init
- ip6mr_notifier_exit
- ip6mr_net_init
- ip6mr_net_exit
- ip6_mr_init
- ip6_mr_cleanup
- ip6mr_mfc_add
- mroute_clean_tables
- ip6mr_sk_init
- ip6mr_sk_done
- mroute6_is_socket
- ip6_mroute_setsockopt
- ip6_mroute_getsockopt
- ip6mr_ioctl
- ip6mr_compat_ioctl
- ip6mr_forward2_finish
- ip6mr_forward2
- ip6mr_find_vif
- ip6_mr_forward
- ip6_mr_input
- ip6mr_get_route
- ip6mr_fill_mroute
- _ip6mr_fill_mroute
- mr6_msgsize
- mr6_netlink_event
- mrt6msg_netlink_msgsize
- mrt6msg_netlink_event
- ip6mr_rtm_dumproute
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
18 #include <linux/mm.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
33 #include <net/raw.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
39
40 #include <net/ipv6.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
50
51 #include <linux/nospec.h>
52
53 struct ip6mr_rule {
54 struct fib_rule common;
55 };
56
57 struct ip6mr_result {
58 struct mr_table *mrt;
59 };
60
61
62
63
64
65 static DEFINE_RWLOCK(mrt_lock);
66
67
68
69
70 static DEFINE_SPINLOCK(mfc_unres_lock);
71
72
73
74
75
76
77
78
79
80 static struct kmem_cache *mrt_cachep __read_mostly;
81
82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83 static void ip6mr_free_table(struct mr_table *mrt);
84
85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
86 struct net_device *dev, struct sk_buff *skb,
87 struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
91 int cmd);
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, int flags);
96 static void ipmr_expire_process(struct timer_list *t);
97
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
101
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
104 {
105 struct mr_table *ret;
106
107 if (!mrt)
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
110 else
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
113
114 if (&ret->list == &net->ipv6.mr6_tables)
115 return NULL;
116 return ret;
117 }
118
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
120 {
121 struct mr_table *mrt;
122
123 ip6mr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128 }
129
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
132 {
133 int err;
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
139
140
141 l3mdev_update_flow(net, flowi6_to_flowi(flp6));
142
143 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
144 flowi6_to_flowi(flp6), 0, &arg);
145 if (err < 0)
146 return err;
147 *mrt = res.mrt;
148 return 0;
149 }
150
151 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 int flags, struct fib_lookup_arg *arg)
153 {
154 struct ip6mr_result *res = arg->result;
155 struct mr_table *mrt;
156
157 switch (rule->action) {
158 case FR_ACT_TO_TBL:
159 break;
160 case FR_ACT_UNREACHABLE:
161 return -ENETUNREACH;
162 case FR_ACT_PROHIBIT:
163 return -EACCES;
164 case FR_ACT_BLACKHOLE:
165 default:
166 return -EINVAL;
167 }
168
169 arg->table = fib_rule_get_table(rule, arg);
170
171 mrt = ip6mr_get_table(rule->fr_net, arg->table);
172 if (!mrt)
173 return -EAGAIN;
174 res->mrt = mrt;
175 return 0;
176 }
177
178 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
179 {
180 return 1;
181 }
182
183 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
184 FRA_GENERIC_POLICY,
185 };
186
187 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
188 struct fib_rule_hdr *frh, struct nlattr **tb,
189 struct netlink_ext_ack *extack)
190 {
191 return 0;
192 }
193
194 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
195 struct nlattr **tb)
196 {
197 return 1;
198 }
199
200 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh)
202 {
203 frh->dst_len = 0;
204 frh->src_len = 0;
205 frh->tos = 0;
206 return 0;
207 }
208
209 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
210 .family = RTNL_FAMILY_IP6MR,
211 .rule_size = sizeof(struct ip6mr_rule),
212 .addr_size = sizeof(struct in6_addr),
213 .action = ip6mr_rule_action,
214 .match = ip6mr_rule_match,
215 .configure = ip6mr_rule_configure,
216 .compare = ip6mr_rule_compare,
217 .fill = ip6mr_rule_fill,
218 .nlgroup = RTNLGRP_IPV6_RULE,
219 .policy = ip6mr_rule_policy,
220 .owner = THIS_MODULE,
221 };
222
223 static int __net_init ip6mr_rules_init(struct net *net)
224 {
225 struct fib_rules_ops *ops;
226 struct mr_table *mrt;
227 int err;
228
229 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
230 if (IS_ERR(ops))
231 return PTR_ERR(ops);
232
233 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
234
235 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
236 if (IS_ERR(mrt)) {
237 err = PTR_ERR(mrt);
238 goto err1;
239 }
240
241 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
242 if (err < 0)
243 goto err2;
244
245 net->ipv6.mr6_rules_ops = ops;
246 return 0;
247
248 err2:
249 ip6mr_free_table(mrt);
250 err1:
251 fib_rules_unregister(ops);
252 return err;
253 }
254
255 static void __net_exit ip6mr_rules_exit(struct net *net)
256 {
257 struct mr_table *mrt, *next;
258
259 rtnl_lock();
260 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
261 list_del(&mrt->list);
262 ip6mr_free_table(mrt);
263 }
264 fib_rules_unregister(net->ipv6.mr6_rules_ops);
265 rtnl_unlock();
266 }
267
268 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
269 {
270 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
271 }
272
273 static unsigned int ip6mr_rules_seq_read(struct net *net)
274 {
275 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
276 }
277
278 bool ip6mr_rule_default(const struct fib_rule *rule)
279 {
280 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
281 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
282 }
283 EXPORT_SYMBOL(ip6mr_rule_default);
284 #else
285 #define ip6mr_for_each_table(mrt, net) \
286 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
287
288 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
289 struct mr_table *mrt)
290 {
291 if (!mrt)
292 return net->ipv6.mrt6;
293 return NULL;
294 }
295
296 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
297 {
298 return net->ipv6.mrt6;
299 }
300
301 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
302 struct mr_table **mrt)
303 {
304 *mrt = net->ipv6.mrt6;
305 return 0;
306 }
307
308 static int __net_init ip6mr_rules_init(struct net *net)
309 {
310 struct mr_table *mrt;
311
312 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
313 if (IS_ERR(mrt))
314 return PTR_ERR(mrt);
315 net->ipv6.mrt6 = mrt;
316 return 0;
317 }
318
319 static void __net_exit ip6mr_rules_exit(struct net *net)
320 {
321 rtnl_lock();
322 ip6mr_free_table(net->ipv6.mrt6);
323 net->ipv6.mrt6 = NULL;
324 rtnl_unlock();
325 }
326
327 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
328 {
329 return 0;
330 }
331
332 static unsigned int ip6mr_rules_seq_read(struct net *net)
333 {
334 return 0;
335 }
336 #endif
337
338 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
339 const void *ptr)
340 {
341 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
342 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
343
344 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
345 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
346 }
347
348 static const struct rhashtable_params ip6mr_rht_params = {
349 .head_offset = offsetof(struct mr_mfc, mnode),
350 .key_offset = offsetof(struct mfc6_cache, cmparg),
351 .key_len = sizeof(struct mfc6_cache_cmp_arg),
352 .nelem_hint = 3,
353 .obj_cmpfn = ip6mr_hash_cmp,
354 .automatic_shrinking = true,
355 };
356
357 static void ip6mr_new_table_set(struct mr_table *mrt,
358 struct net *net)
359 {
360 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
361 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
362 #endif
363 }
364
365 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
366 .mf6c_origin = IN6ADDR_ANY_INIT,
367 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
368 };
369
370 static struct mr_table_ops ip6mr_mr_table_ops = {
371 .rht_params = &ip6mr_rht_params,
372 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
373 };
374
375 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
376 {
377 struct mr_table *mrt;
378
379 mrt = ip6mr_get_table(net, id);
380 if (mrt)
381 return mrt;
382
383 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
384 ipmr_expire_process, ip6mr_new_table_set);
385 }
386
387 static void ip6mr_free_table(struct mr_table *mrt)
388 {
389 del_timer_sync(&mrt->ipmr_expire_timer);
390 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
391 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
392 rhltable_destroy(&mrt->mfc_hash);
393 kfree(mrt);
394 }
395
396 #ifdef CONFIG_PROC_FS
397
398
399
400
401 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
402 __acquires(mrt_lock)
403 {
404 struct mr_vif_iter *iter = seq->private;
405 struct net *net = seq_file_net(seq);
406 struct mr_table *mrt;
407
408 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
409 if (!mrt)
410 return ERR_PTR(-ENOENT);
411
412 iter->mrt = mrt;
413
414 read_lock(&mrt_lock);
415 return mr_vif_seq_start(seq, pos);
416 }
417
418 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
419 __releases(mrt_lock)
420 {
421 read_unlock(&mrt_lock);
422 }
423
424 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
425 {
426 struct mr_vif_iter *iter = seq->private;
427 struct mr_table *mrt = iter->mrt;
428
429 if (v == SEQ_START_TOKEN) {
430 seq_puts(seq,
431 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
432 } else {
433 const struct vif_device *vif = v;
434 const char *name = vif->dev ? vif->dev->name : "none";
435
436 seq_printf(seq,
437 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
438 vif - mrt->vif_table,
439 name, vif->bytes_in, vif->pkt_in,
440 vif->bytes_out, vif->pkt_out,
441 vif->flags);
442 }
443 return 0;
444 }
445
446 static const struct seq_operations ip6mr_vif_seq_ops = {
447 .start = ip6mr_vif_seq_start,
448 .next = mr_vif_seq_next,
449 .stop = ip6mr_vif_seq_stop,
450 .show = ip6mr_vif_seq_show,
451 };
452
453 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
454 {
455 struct net *net = seq_file_net(seq);
456 struct mr_table *mrt;
457
458 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
459 if (!mrt)
460 return ERR_PTR(-ENOENT);
461
462 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
463 }
464
465 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
466 {
467 int n;
468
469 if (v == SEQ_START_TOKEN) {
470 seq_puts(seq,
471 "Group "
472 "Origin "
473 "Iif Pkts Bytes Wrong Oifs\n");
474 } else {
475 const struct mfc6_cache *mfc = v;
476 const struct mr_mfc_iter *it = seq->private;
477 struct mr_table *mrt = it->mrt;
478
479 seq_printf(seq, "%pI6 %pI6 %-3hd",
480 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
481 mfc->_c.mfc_parent);
482
483 if (it->cache != &mrt->mfc_unres_queue) {
484 seq_printf(seq, " %8lu %8lu %8lu",
485 mfc->_c.mfc_un.res.pkt,
486 mfc->_c.mfc_un.res.bytes,
487 mfc->_c.mfc_un.res.wrong_if);
488 for (n = mfc->_c.mfc_un.res.minvif;
489 n < mfc->_c.mfc_un.res.maxvif; n++) {
490 if (VIF_EXISTS(mrt, n) &&
491 mfc->_c.mfc_un.res.ttls[n] < 255)
492 seq_printf(seq,
493 " %2d:%-3d", n,
494 mfc->_c.mfc_un.res.ttls[n]);
495 }
496 } else {
497
498
499
500 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
501 }
502 seq_putc(seq, '\n');
503 }
504 return 0;
505 }
506
507 static const struct seq_operations ipmr_mfc_seq_ops = {
508 .start = ipmr_mfc_seq_start,
509 .next = mr_mfc_seq_next,
510 .stop = mr_mfc_seq_stop,
511 .show = ipmr_mfc_seq_show,
512 };
513 #endif
514
515 #ifdef CONFIG_IPV6_PIMSM_V2
516
517 static int pim6_rcv(struct sk_buff *skb)
518 {
519 struct pimreghdr *pim;
520 struct ipv6hdr *encap;
521 struct net_device *reg_dev = NULL;
522 struct net *net = dev_net(skb->dev);
523 struct mr_table *mrt;
524 struct flowi6 fl6 = {
525 .flowi6_iif = skb->dev->ifindex,
526 .flowi6_mark = skb->mark,
527 };
528 int reg_vif_num;
529
530 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
531 goto drop;
532
533 pim = (struct pimreghdr *)skb_transport_header(skb);
534 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
535 (pim->flags & PIM_NULL_REGISTER) ||
536 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
537 sizeof(*pim), IPPROTO_PIM,
538 csum_partial((void *)pim, sizeof(*pim), 0)) &&
539 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
540 goto drop;
541
542
543 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
544 sizeof(*pim));
545
546 if (!ipv6_addr_is_multicast(&encap->daddr) ||
547 encap->payload_len == 0 ||
548 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
549 goto drop;
550
551 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
552 goto drop;
553 reg_vif_num = mrt->mroute_reg_vif_num;
554
555 read_lock(&mrt_lock);
556 if (reg_vif_num >= 0)
557 reg_dev = mrt->vif_table[reg_vif_num].dev;
558 if (reg_dev)
559 dev_hold(reg_dev);
560 read_unlock(&mrt_lock);
561
562 if (!reg_dev)
563 goto drop;
564
565 skb->mac_header = skb->network_header;
566 skb_pull(skb, (u8 *)encap - skb->data);
567 skb_reset_network_header(skb);
568 skb->protocol = htons(ETH_P_IPV6);
569 skb->ip_summed = CHECKSUM_NONE;
570
571 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
572
573 netif_rx(skb);
574
575 dev_put(reg_dev);
576 return 0;
577 drop:
578 kfree_skb(skb);
579 return 0;
580 }
581
582 static const struct inet6_protocol pim6_protocol = {
583 .handler = pim6_rcv,
584 };
585
586
587
588 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
589 struct net_device *dev)
590 {
591 struct net *net = dev_net(dev);
592 struct mr_table *mrt;
593 struct flowi6 fl6 = {
594 .flowi6_oif = dev->ifindex,
595 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
596 .flowi6_mark = skb->mark,
597 };
598
599 if (!pskb_inet_may_pull(skb))
600 goto tx_err;
601
602 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
603 goto tx_err;
604
605 read_lock(&mrt_lock);
606 dev->stats.tx_bytes += skb->len;
607 dev->stats.tx_packets++;
608 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
609 read_unlock(&mrt_lock);
610 kfree_skb(skb);
611 return NETDEV_TX_OK;
612
613 tx_err:
614 dev->stats.tx_errors++;
615 kfree_skb(skb);
616 return NETDEV_TX_OK;
617 }
618
619 static int reg_vif_get_iflink(const struct net_device *dev)
620 {
621 return 0;
622 }
623
624 static const struct net_device_ops reg_vif_netdev_ops = {
625 .ndo_start_xmit = reg_vif_xmit,
626 .ndo_get_iflink = reg_vif_get_iflink,
627 };
628
629 static void reg_vif_setup(struct net_device *dev)
630 {
631 dev->type = ARPHRD_PIMREG;
632 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
633 dev->flags = IFF_NOARP;
634 dev->netdev_ops = ®_vif_netdev_ops;
635 dev->needs_free_netdev = true;
636 dev->features |= NETIF_F_NETNS_LOCAL;
637 }
638
639 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
640 {
641 struct net_device *dev;
642 char name[IFNAMSIZ];
643
644 if (mrt->id == RT6_TABLE_DFLT)
645 sprintf(name, "pim6reg");
646 else
647 sprintf(name, "pim6reg%u", mrt->id);
648
649 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
650 if (!dev)
651 return NULL;
652
653 dev_net_set(dev, net);
654
655 if (register_netdevice(dev)) {
656 free_netdev(dev);
657 return NULL;
658 }
659
660 if (dev_open(dev, NULL))
661 goto failure;
662
663 dev_hold(dev);
664 return dev;
665
666 failure:
667 unregister_netdevice(dev);
668 return NULL;
669 }
670 #endif
671
672 static int call_ip6mr_vif_entry_notifiers(struct net *net,
673 enum fib_event_type event_type,
674 struct vif_device *vif,
675 mifi_t vif_index, u32 tb_id)
676 {
677 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
678 vif, vif_index, tb_id,
679 &net->ipv6.ipmr_seq);
680 }
681
682 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
683 enum fib_event_type event_type,
684 struct mfc6_cache *mfc, u32 tb_id)
685 {
686 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
687 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
688 }
689
690
691 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
692 struct list_head *head)
693 {
694 struct vif_device *v;
695 struct net_device *dev;
696 struct inet6_dev *in6_dev;
697
698 if (vifi < 0 || vifi >= mrt->maxvif)
699 return -EADDRNOTAVAIL;
700
701 v = &mrt->vif_table[vifi];
702
703 if (VIF_EXISTS(mrt, vifi))
704 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
705 FIB_EVENT_VIF_DEL, v, vifi,
706 mrt->id);
707
708 write_lock_bh(&mrt_lock);
709 dev = v->dev;
710 v->dev = NULL;
711
712 if (!dev) {
713 write_unlock_bh(&mrt_lock);
714 return -EADDRNOTAVAIL;
715 }
716
717 #ifdef CONFIG_IPV6_PIMSM_V2
718 if (vifi == mrt->mroute_reg_vif_num)
719 mrt->mroute_reg_vif_num = -1;
720 #endif
721
722 if (vifi + 1 == mrt->maxvif) {
723 int tmp;
724 for (tmp = vifi - 1; tmp >= 0; tmp--) {
725 if (VIF_EXISTS(mrt, tmp))
726 break;
727 }
728 mrt->maxvif = tmp + 1;
729 }
730
731 write_unlock_bh(&mrt_lock);
732
733 dev_set_allmulti(dev, -1);
734
735 in6_dev = __in6_dev_get(dev);
736 if (in6_dev) {
737 in6_dev->cnf.mc_forwarding--;
738 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
739 NETCONFA_MC_FORWARDING,
740 dev->ifindex, &in6_dev->cnf);
741 }
742
743 if ((v->flags & MIFF_REGISTER) && !notify)
744 unregister_netdevice_queue(dev, head);
745
746 dev_put(dev);
747 return 0;
748 }
749
750 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
751 {
752 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
753
754 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
755 }
756
757 static inline void ip6mr_cache_free(struct mfc6_cache *c)
758 {
759 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
760 }
761
762
763
764
765
766 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
767 {
768 struct net *net = read_pnet(&mrt->net);
769 struct sk_buff *skb;
770
771 atomic_dec(&mrt->cache_resolve_queue_len);
772
773 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
774 if (ipv6_hdr(skb)->version == 0) {
775 struct nlmsghdr *nlh = skb_pull(skb,
776 sizeof(struct ipv6hdr));
777 nlh->nlmsg_type = NLMSG_ERROR;
778 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
779 skb_trim(skb, nlh->nlmsg_len);
780 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
781 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
782 } else
783 kfree_skb(skb);
784 }
785
786 ip6mr_cache_free(c);
787 }
788
789
790
791
792 static void ipmr_do_expire_process(struct mr_table *mrt)
793 {
794 unsigned long now = jiffies;
795 unsigned long expires = 10 * HZ;
796 struct mr_mfc *c, *next;
797
798 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
799 if (time_after(c->mfc_un.unres.expires, now)) {
800
801 unsigned long interval = c->mfc_un.unres.expires - now;
802 if (interval < expires)
803 expires = interval;
804 continue;
805 }
806
807 list_del(&c->list);
808 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
809 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
810 }
811
812 if (!list_empty(&mrt->mfc_unres_queue))
813 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
814 }
815
816 static void ipmr_expire_process(struct timer_list *t)
817 {
818 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
819
820 if (!spin_trylock(&mfc_unres_lock)) {
821 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
822 return;
823 }
824
825 if (!list_empty(&mrt->mfc_unres_queue))
826 ipmr_do_expire_process(mrt);
827
828 spin_unlock(&mfc_unres_lock);
829 }
830
831
832
833 static void ip6mr_update_thresholds(struct mr_table *mrt,
834 struct mr_mfc *cache,
835 unsigned char *ttls)
836 {
837 int vifi;
838
839 cache->mfc_un.res.minvif = MAXMIFS;
840 cache->mfc_un.res.maxvif = 0;
841 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
842
843 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
844 if (VIF_EXISTS(mrt, vifi) &&
845 ttls[vifi] && ttls[vifi] < 255) {
846 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
847 if (cache->mfc_un.res.minvif > vifi)
848 cache->mfc_un.res.minvif = vifi;
849 if (cache->mfc_un.res.maxvif <= vifi)
850 cache->mfc_un.res.maxvif = vifi + 1;
851 }
852 }
853 cache->mfc_un.res.lastuse = jiffies;
854 }
855
856 static int mif6_add(struct net *net, struct mr_table *mrt,
857 struct mif6ctl *vifc, int mrtsock)
858 {
859 int vifi = vifc->mif6c_mifi;
860 struct vif_device *v = &mrt->vif_table[vifi];
861 struct net_device *dev;
862 struct inet6_dev *in6_dev;
863 int err;
864
865
866 if (VIF_EXISTS(mrt, vifi))
867 return -EADDRINUSE;
868
869 switch (vifc->mif6c_flags) {
870 #ifdef CONFIG_IPV6_PIMSM_V2
871 case MIFF_REGISTER:
872
873
874
875
876 if (mrt->mroute_reg_vif_num >= 0)
877 return -EADDRINUSE;
878 dev = ip6mr_reg_vif(net, mrt);
879 if (!dev)
880 return -ENOBUFS;
881 err = dev_set_allmulti(dev, 1);
882 if (err) {
883 unregister_netdevice(dev);
884 dev_put(dev);
885 return err;
886 }
887 break;
888 #endif
889 case 0:
890 dev = dev_get_by_index(net, vifc->mif6c_pifi);
891 if (!dev)
892 return -EADDRNOTAVAIL;
893 err = dev_set_allmulti(dev, 1);
894 if (err) {
895 dev_put(dev);
896 return err;
897 }
898 break;
899 default:
900 return -EINVAL;
901 }
902
903 in6_dev = __in6_dev_get(dev);
904 if (in6_dev) {
905 in6_dev->cnf.mc_forwarding++;
906 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
907 NETCONFA_MC_FORWARDING,
908 dev->ifindex, &in6_dev->cnf);
909 }
910
911
912 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
913 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
914 MIFF_REGISTER);
915
916
917 write_lock_bh(&mrt_lock);
918 v->dev = dev;
919 #ifdef CONFIG_IPV6_PIMSM_V2
920 if (v->flags & MIFF_REGISTER)
921 mrt->mroute_reg_vif_num = vifi;
922 #endif
923 if (vifi + 1 > mrt->maxvif)
924 mrt->maxvif = vifi + 1;
925 write_unlock_bh(&mrt_lock);
926 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
927 v, vifi, mrt->id);
928 return 0;
929 }
930
931 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
932 const struct in6_addr *origin,
933 const struct in6_addr *mcastgrp)
934 {
935 struct mfc6_cache_cmp_arg arg = {
936 .mf6c_origin = *origin,
937 .mf6c_mcastgrp = *mcastgrp,
938 };
939
940 return mr_mfc_find(mrt, &arg);
941 }
942
943
944 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
945 struct in6_addr *mcastgrp,
946 mifi_t mifi)
947 {
948 struct mfc6_cache_cmp_arg arg = {
949 .mf6c_origin = in6addr_any,
950 .mf6c_mcastgrp = *mcastgrp,
951 };
952
953 if (ipv6_addr_any(mcastgrp))
954 return mr_mfc_find_any_parent(mrt, mifi);
955 return mr_mfc_find_any(mrt, mifi, &arg);
956 }
957
958
959 static struct mfc6_cache *
960 ip6mr_cache_find_parent(struct mr_table *mrt,
961 const struct in6_addr *origin,
962 const struct in6_addr *mcastgrp,
963 int parent)
964 {
965 struct mfc6_cache_cmp_arg arg = {
966 .mf6c_origin = *origin,
967 .mf6c_mcastgrp = *mcastgrp,
968 };
969
970 return mr_mfc_find_parent(mrt, &arg, parent);
971 }
972
973
974 static struct mfc6_cache *ip6mr_cache_alloc(void)
975 {
976 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
977 if (!c)
978 return NULL;
979 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
980 c->_c.mfc_un.res.minvif = MAXMIFS;
981 c->_c.free = ip6mr_cache_free_rcu;
982 refcount_set(&c->_c.mfc_un.res.refcount, 1);
983 return c;
984 }
985
986 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
987 {
988 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
989 if (!c)
990 return NULL;
991 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
992 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
993 return c;
994 }
995
996
997
998
999
1000 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1001 struct mfc6_cache *uc, struct mfc6_cache *c)
1002 {
1003 struct sk_buff *skb;
1004
1005
1006
1007
1008
1009 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1010 if (ipv6_hdr(skb)->version == 0) {
1011 struct nlmsghdr *nlh = skb_pull(skb,
1012 sizeof(struct ipv6hdr));
1013
1014 if (mr_fill_mroute(mrt, skb, &c->_c,
1015 nlmsg_data(nlh)) > 0) {
1016 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1017 } else {
1018 nlh->nlmsg_type = NLMSG_ERROR;
1019 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1020 skb_trim(skb, nlh->nlmsg_len);
1021 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1022 }
1023 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1024 } else
1025 ip6_mr_forward(net, mrt, skb->dev, skb, c);
1026 }
1027 }
1028
1029
1030
1031
1032
1033
1034
1035 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1036 mifi_t mifi, int assert)
1037 {
1038 struct sock *mroute6_sk;
1039 struct sk_buff *skb;
1040 struct mrt6msg *msg;
1041 int ret;
1042
1043 #ifdef CONFIG_IPV6_PIMSM_V2
1044 if (assert == MRT6MSG_WHOLEPKT)
1045 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1046 +sizeof(*msg));
1047 else
1048 #endif
1049 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1050
1051 if (!skb)
1052 return -ENOBUFS;
1053
1054
1055
1056
1057 skb->ip_summed = CHECKSUM_UNNECESSARY;
1058
1059 #ifdef CONFIG_IPV6_PIMSM_V2
1060 if (assert == MRT6MSG_WHOLEPKT) {
1061
1062
1063
1064
1065
1066 skb_push(skb, -skb_network_offset(pkt));
1067
1068 skb_push(skb, sizeof(*msg));
1069 skb_reset_transport_header(skb);
1070 msg = (struct mrt6msg *)skb_transport_header(skb);
1071 msg->im6_mbz = 0;
1072 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1073 msg->im6_mif = mrt->mroute_reg_vif_num;
1074 msg->im6_pad = 0;
1075 msg->im6_src = ipv6_hdr(pkt)->saddr;
1076 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1077
1078 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 } else
1080 #endif
1081 {
1082
1083
1084
1085
1086 skb_put(skb, sizeof(struct ipv6hdr));
1087 skb_reset_network_header(skb);
1088 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1089
1090
1091
1092
1093 skb_put(skb, sizeof(*msg));
1094 skb_reset_transport_header(skb);
1095 msg = (struct mrt6msg *)skb_transport_header(skb);
1096
1097 msg->im6_mbz = 0;
1098 msg->im6_msgtype = assert;
1099 msg->im6_mif = mifi;
1100 msg->im6_pad = 0;
1101 msg->im6_src = ipv6_hdr(pkt)->saddr;
1102 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1103
1104 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1105 skb->ip_summed = CHECKSUM_UNNECESSARY;
1106 }
1107
1108 rcu_read_lock();
1109 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1110 if (!mroute6_sk) {
1111 rcu_read_unlock();
1112 kfree_skb(skb);
1113 return -EINVAL;
1114 }
1115
1116 mrt6msg_netlink_event(mrt, skb);
1117
1118
1119 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1120 rcu_read_unlock();
1121 if (ret < 0) {
1122 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1123 kfree_skb(skb);
1124 }
1125
1126 return ret;
1127 }
1128
1129
1130 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1131 struct sk_buff *skb, struct net_device *dev)
1132 {
1133 struct mfc6_cache *c;
1134 bool found = false;
1135 int err;
1136
1137 spin_lock_bh(&mfc_unres_lock);
1138 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1139 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1140 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1141 found = true;
1142 break;
1143 }
1144 }
1145
1146 if (!found) {
1147
1148
1149
1150
1151 c = ip6mr_cache_alloc_unres();
1152 if (!c) {
1153 spin_unlock_bh(&mfc_unres_lock);
1154
1155 kfree_skb(skb);
1156 return -ENOBUFS;
1157 }
1158
1159
1160 c->_c.mfc_parent = -1;
1161 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1162 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1163
1164
1165
1166
1167 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1168 if (err < 0) {
1169
1170
1171
1172 spin_unlock_bh(&mfc_unres_lock);
1173
1174 ip6mr_cache_free(c);
1175 kfree_skb(skb);
1176 return err;
1177 }
1178
1179 atomic_inc(&mrt->cache_resolve_queue_len);
1180 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1181 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1182
1183 ipmr_do_expire_process(mrt);
1184 }
1185
1186
1187 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1188 kfree_skb(skb);
1189 err = -ENOBUFS;
1190 } else {
1191 if (dev) {
1192 skb->dev = dev;
1193 skb->skb_iif = dev->ifindex;
1194 }
1195 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1196 err = 0;
1197 }
1198
1199 spin_unlock_bh(&mfc_unres_lock);
1200 return err;
1201 }
1202
1203
1204
1205
1206
1207 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1208 int parent)
1209 {
1210 struct mfc6_cache *c;
1211
1212
1213 rcu_read_lock();
1214 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1215 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1216 rcu_read_unlock();
1217 if (!c)
1218 return -ENOENT;
1219 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1220 list_del_rcu(&c->_c.list);
1221
1222 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1223 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1224 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1225 mr_cache_put(&c->_c);
1226 return 0;
1227 }
1228
1229 static int ip6mr_device_event(struct notifier_block *this,
1230 unsigned long event, void *ptr)
1231 {
1232 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1233 struct net *net = dev_net(dev);
1234 struct mr_table *mrt;
1235 struct vif_device *v;
1236 int ct;
1237
1238 if (event != NETDEV_UNREGISTER)
1239 return NOTIFY_DONE;
1240
1241 ip6mr_for_each_table(mrt, net) {
1242 v = &mrt->vif_table[0];
1243 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1244 if (v->dev == dev)
1245 mif6_delete(mrt, ct, 1, NULL);
1246 }
1247 }
1248
1249 return NOTIFY_DONE;
1250 }
1251
1252 static unsigned int ip6mr_seq_read(struct net *net)
1253 {
1254 ASSERT_RTNL();
1255
1256 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1257 }
1258
1259 static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1260 {
1261 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1262 ip6mr_mr_table_iter, &mrt_lock);
1263 }
1264
1265 static struct notifier_block ip6_mr_notifier = {
1266 .notifier_call = ip6mr_device_event
1267 };
1268
1269 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1270 .family = RTNL_FAMILY_IP6MR,
1271 .fib_seq_read = ip6mr_seq_read,
1272 .fib_dump = ip6mr_dump,
1273 .owner = THIS_MODULE,
1274 };
1275
1276 static int __net_init ip6mr_notifier_init(struct net *net)
1277 {
1278 struct fib_notifier_ops *ops;
1279
1280 net->ipv6.ipmr_seq = 0;
1281
1282 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1283 if (IS_ERR(ops))
1284 return PTR_ERR(ops);
1285
1286 net->ipv6.ip6mr_notifier_ops = ops;
1287
1288 return 0;
1289 }
1290
1291 static void __net_exit ip6mr_notifier_exit(struct net *net)
1292 {
1293 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1294 net->ipv6.ip6mr_notifier_ops = NULL;
1295 }
1296
1297
1298 static int __net_init ip6mr_net_init(struct net *net)
1299 {
1300 int err;
1301
1302 err = ip6mr_notifier_init(net);
1303 if (err)
1304 return err;
1305
1306 err = ip6mr_rules_init(net);
1307 if (err < 0)
1308 goto ip6mr_rules_fail;
1309
1310 #ifdef CONFIG_PROC_FS
1311 err = -ENOMEM;
1312 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1313 sizeof(struct mr_vif_iter)))
1314 goto proc_vif_fail;
1315 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1316 sizeof(struct mr_mfc_iter)))
1317 goto proc_cache_fail;
1318 #endif
1319
1320 return 0;
1321
1322 #ifdef CONFIG_PROC_FS
1323 proc_cache_fail:
1324 remove_proc_entry("ip6_mr_vif", net->proc_net);
1325 proc_vif_fail:
1326 ip6mr_rules_exit(net);
1327 #endif
1328 ip6mr_rules_fail:
1329 ip6mr_notifier_exit(net);
1330 return err;
1331 }
1332
1333 static void __net_exit ip6mr_net_exit(struct net *net)
1334 {
1335 #ifdef CONFIG_PROC_FS
1336 remove_proc_entry("ip6_mr_cache", net->proc_net);
1337 remove_proc_entry("ip6_mr_vif", net->proc_net);
1338 #endif
1339 ip6mr_rules_exit(net);
1340 ip6mr_notifier_exit(net);
1341 }
1342
1343 static struct pernet_operations ip6mr_net_ops = {
1344 .init = ip6mr_net_init,
1345 .exit = ip6mr_net_exit,
1346 };
1347
1348 int __init ip6_mr_init(void)
1349 {
1350 int err;
1351
1352 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1353 sizeof(struct mfc6_cache),
1354 0, SLAB_HWCACHE_ALIGN,
1355 NULL);
1356 if (!mrt_cachep)
1357 return -ENOMEM;
1358
1359 err = register_pernet_subsys(&ip6mr_net_ops);
1360 if (err)
1361 goto reg_pernet_fail;
1362
1363 err = register_netdevice_notifier(&ip6_mr_notifier);
1364 if (err)
1365 goto reg_notif_fail;
1366 #ifdef CONFIG_IPV6_PIMSM_V2
1367 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1368 pr_err("%s: can't add PIM protocol\n", __func__);
1369 err = -EAGAIN;
1370 goto add_proto_fail;
1371 }
1372 #endif
1373 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1374 NULL, ip6mr_rtm_dumproute, 0);
1375 if (err == 0)
1376 return 0;
1377
1378 #ifdef CONFIG_IPV6_PIMSM_V2
1379 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1380 add_proto_fail:
1381 unregister_netdevice_notifier(&ip6_mr_notifier);
1382 #endif
1383 reg_notif_fail:
1384 unregister_pernet_subsys(&ip6mr_net_ops);
1385 reg_pernet_fail:
1386 kmem_cache_destroy(mrt_cachep);
1387 return err;
1388 }
1389
1390 void ip6_mr_cleanup(void)
1391 {
1392 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1393 #ifdef CONFIG_IPV6_PIMSM_V2
1394 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1395 #endif
1396 unregister_netdevice_notifier(&ip6_mr_notifier);
1397 unregister_pernet_subsys(&ip6mr_net_ops);
1398 kmem_cache_destroy(mrt_cachep);
1399 }
1400
1401 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1402 struct mf6cctl *mfc, int mrtsock, int parent)
1403 {
1404 unsigned char ttls[MAXMIFS];
1405 struct mfc6_cache *uc, *c;
1406 struct mr_mfc *_uc;
1407 bool found;
1408 int i, err;
1409
1410 if (mfc->mf6cc_parent >= MAXMIFS)
1411 return -ENFILE;
1412
1413 memset(ttls, 255, MAXMIFS);
1414 for (i = 0; i < MAXMIFS; i++) {
1415 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1416 ttls[i] = 1;
1417 }
1418
1419
1420 rcu_read_lock();
1421 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1422 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1423 rcu_read_unlock();
1424 if (c) {
1425 write_lock_bh(&mrt_lock);
1426 c->_c.mfc_parent = mfc->mf6cc_parent;
1427 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1428 if (!mrtsock)
1429 c->_c.mfc_flags |= MFC_STATIC;
1430 write_unlock_bh(&mrt_lock);
1431 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1432 c, mrt->id);
1433 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1434 return 0;
1435 }
1436
1437 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1438 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1439 return -EINVAL;
1440
1441 c = ip6mr_cache_alloc();
1442 if (!c)
1443 return -ENOMEM;
1444
1445 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1446 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1447 c->_c.mfc_parent = mfc->mf6cc_parent;
1448 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1449 if (!mrtsock)
1450 c->_c.mfc_flags |= MFC_STATIC;
1451
1452 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1453 ip6mr_rht_params);
1454 if (err) {
1455 pr_err("ip6mr: rhtable insert error %d\n", err);
1456 ip6mr_cache_free(c);
1457 return err;
1458 }
1459 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1460
1461
1462
1463
1464 found = false;
1465 spin_lock_bh(&mfc_unres_lock);
1466 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1467 uc = (struct mfc6_cache *)_uc;
1468 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1469 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1470 list_del(&_uc->list);
1471 atomic_dec(&mrt->cache_resolve_queue_len);
1472 found = true;
1473 break;
1474 }
1475 }
1476 if (list_empty(&mrt->mfc_unres_queue))
1477 del_timer(&mrt->ipmr_expire_timer);
1478 spin_unlock_bh(&mfc_unres_lock);
1479
1480 if (found) {
1481 ip6mr_cache_resolve(net, mrt, uc, c);
1482 ip6mr_cache_free(uc);
1483 }
1484 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1485 c, mrt->id);
1486 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1487 return 0;
1488 }
1489
1490
1491
1492
1493
1494 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1495 {
1496 struct mr_mfc *c, *tmp;
1497 LIST_HEAD(list);
1498 int i;
1499
1500
1501 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1502 for (i = 0; i < mrt->maxvif; i++) {
1503 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1504 !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1505 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1506 continue;
1507 mif6_delete(mrt, i, 0, &list);
1508 }
1509 unregister_netdevice_many(&list);
1510 }
1511
1512
1513 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1514 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1515 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1516 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1517 continue;
1518 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1519 list_del_rcu(&c->list);
1520 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1521 FIB_EVENT_ENTRY_DEL,
1522 (struct mfc6_cache *)c, mrt->id);
1523 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1524 mr_cache_put(c);
1525 }
1526 }
1527
1528 if (flags & MRT6_FLUSH_MFC) {
1529 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1530 spin_lock_bh(&mfc_unres_lock);
1531 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1532 list_del(&c->list);
1533 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1534 RTM_DELROUTE);
1535 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1536 }
1537 spin_unlock_bh(&mfc_unres_lock);
1538 }
1539 }
1540 }
1541
1542 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1543 {
1544 int err = 0;
1545 struct net *net = sock_net(sk);
1546
1547 rtnl_lock();
1548 write_lock_bh(&mrt_lock);
1549 if (rtnl_dereference(mrt->mroute_sk)) {
1550 err = -EADDRINUSE;
1551 } else {
1552 rcu_assign_pointer(mrt->mroute_sk, sk);
1553 sock_set_flag(sk, SOCK_RCU_FREE);
1554 net->ipv6.devconf_all->mc_forwarding++;
1555 }
1556 write_unlock_bh(&mrt_lock);
1557
1558 if (!err)
1559 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1560 NETCONFA_MC_FORWARDING,
1561 NETCONFA_IFINDEX_ALL,
1562 net->ipv6.devconf_all);
1563 rtnl_unlock();
1564
1565 return err;
1566 }
1567
1568 int ip6mr_sk_done(struct sock *sk)
1569 {
1570 int err = -EACCES;
1571 struct net *net = sock_net(sk);
1572 struct mr_table *mrt;
1573
1574 if (sk->sk_type != SOCK_RAW ||
1575 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1576 return err;
1577
1578 rtnl_lock();
1579 ip6mr_for_each_table(mrt, net) {
1580 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1581 write_lock_bh(&mrt_lock);
1582 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1583
1584
1585
1586
1587 net->ipv6.devconf_all->mc_forwarding--;
1588 write_unlock_bh(&mrt_lock);
1589 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1590 NETCONFA_MC_FORWARDING,
1591 NETCONFA_IFINDEX_ALL,
1592 net->ipv6.devconf_all);
1593
1594 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1595 err = 0;
1596 break;
1597 }
1598 }
1599 rtnl_unlock();
1600
1601 return err;
1602 }
1603
1604 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1605 {
1606 struct mr_table *mrt;
1607 struct flowi6 fl6 = {
1608 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1609 .flowi6_oif = skb->dev->ifindex,
1610 .flowi6_mark = skb->mark,
1611 };
1612
1613 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1614 return NULL;
1615
1616 return rcu_access_pointer(mrt->mroute_sk);
1617 }
1618 EXPORT_SYMBOL(mroute6_is_socket);
1619
1620
1621
1622
1623
1624
1625
1626
1627 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1628 {
1629 int ret, parent = 0;
1630 struct mif6ctl vif;
1631 struct mf6cctl mfc;
1632 mifi_t mifi;
1633 struct net *net = sock_net(sk);
1634 struct mr_table *mrt;
1635
1636 if (sk->sk_type != SOCK_RAW ||
1637 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1638 return -EOPNOTSUPP;
1639
1640 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1641 if (!mrt)
1642 return -ENOENT;
1643
1644 if (optname != MRT6_INIT) {
1645 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1646 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1647 return -EACCES;
1648 }
1649
1650 switch (optname) {
1651 case MRT6_INIT:
1652 if (optlen < sizeof(int))
1653 return -EINVAL;
1654
1655 return ip6mr_sk_init(mrt, sk);
1656
1657 case MRT6_DONE:
1658 return ip6mr_sk_done(sk);
1659
1660 case MRT6_ADD_MIF:
1661 if (optlen < sizeof(vif))
1662 return -EINVAL;
1663 if (copy_from_user(&vif, optval, sizeof(vif)))
1664 return -EFAULT;
1665 if (vif.mif6c_mifi >= MAXMIFS)
1666 return -ENFILE;
1667 rtnl_lock();
1668 ret = mif6_add(net, mrt, &vif,
1669 sk == rtnl_dereference(mrt->mroute_sk));
1670 rtnl_unlock();
1671 return ret;
1672
1673 case MRT6_DEL_MIF:
1674 if (optlen < sizeof(mifi_t))
1675 return -EINVAL;
1676 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1677 return -EFAULT;
1678 rtnl_lock();
1679 ret = mif6_delete(mrt, mifi, 0, NULL);
1680 rtnl_unlock();
1681 return ret;
1682
1683
1684
1685
1686
1687 case MRT6_ADD_MFC:
1688 case MRT6_DEL_MFC:
1689 parent = -1;
1690
1691 case MRT6_ADD_MFC_PROXY:
1692 case MRT6_DEL_MFC_PROXY:
1693 if (optlen < sizeof(mfc))
1694 return -EINVAL;
1695 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1696 return -EFAULT;
1697 if (parent == 0)
1698 parent = mfc.mf6cc_parent;
1699 rtnl_lock();
1700 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1701 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1702 else
1703 ret = ip6mr_mfc_add(net, mrt, &mfc,
1704 sk ==
1705 rtnl_dereference(mrt->mroute_sk),
1706 parent);
1707 rtnl_unlock();
1708 return ret;
1709
1710 case MRT6_FLUSH:
1711 {
1712 int flags;
1713
1714 if (optlen != sizeof(flags))
1715 return -EINVAL;
1716 if (get_user(flags, (int __user *)optval))
1717 return -EFAULT;
1718 rtnl_lock();
1719 mroute_clean_tables(mrt, flags);
1720 rtnl_unlock();
1721 return 0;
1722 }
1723
1724
1725
1726
1727 case MRT6_ASSERT:
1728 {
1729 int v;
1730
1731 if (optlen != sizeof(v))
1732 return -EINVAL;
1733 if (get_user(v, (int __user *)optval))
1734 return -EFAULT;
1735 mrt->mroute_do_assert = v;
1736 return 0;
1737 }
1738
1739 #ifdef CONFIG_IPV6_PIMSM_V2
1740 case MRT6_PIM:
1741 {
1742 int v;
1743
1744 if (optlen != sizeof(v))
1745 return -EINVAL;
1746 if (get_user(v, (int __user *)optval))
1747 return -EFAULT;
1748 v = !!v;
1749 rtnl_lock();
1750 ret = 0;
1751 if (v != mrt->mroute_do_pim) {
1752 mrt->mroute_do_pim = v;
1753 mrt->mroute_do_assert = v;
1754 }
1755 rtnl_unlock();
1756 return ret;
1757 }
1758
1759 #endif
1760 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1761 case MRT6_TABLE:
1762 {
1763 u32 v;
1764
1765 if (optlen != sizeof(u32))
1766 return -EINVAL;
1767 if (get_user(v, (u32 __user *)optval))
1768 return -EFAULT;
1769
1770 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1771 return -EINVAL;
1772 if (sk == rcu_access_pointer(mrt->mroute_sk))
1773 return -EBUSY;
1774
1775 rtnl_lock();
1776 ret = 0;
1777 mrt = ip6mr_new_table(net, v);
1778 if (IS_ERR(mrt))
1779 ret = PTR_ERR(mrt);
1780 else
1781 raw6_sk(sk)->ip6mr_table = v;
1782 rtnl_unlock();
1783 return ret;
1784 }
1785 #endif
1786
1787
1788
1789
1790 default:
1791 return -ENOPROTOOPT;
1792 }
1793 }
1794
1795
1796
1797
1798
1799 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1800 int __user *optlen)
1801 {
1802 int olr;
1803 int val;
1804 struct net *net = sock_net(sk);
1805 struct mr_table *mrt;
1806
1807 if (sk->sk_type != SOCK_RAW ||
1808 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1809 return -EOPNOTSUPP;
1810
1811 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1812 if (!mrt)
1813 return -ENOENT;
1814
1815 switch (optname) {
1816 case MRT6_VERSION:
1817 val = 0x0305;
1818 break;
1819 #ifdef CONFIG_IPV6_PIMSM_V2
1820 case MRT6_PIM:
1821 val = mrt->mroute_do_pim;
1822 break;
1823 #endif
1824 case MRT6_ASSERT:
1825 val = mrt->mroute_do_assert;
1826 break;
1827 default:
1828 return -ENOPROTOOPT;
1829 }
1830
1831 if (get_user(olr, optlen))
1832 return -EFAULT;
1833
1834 olr = min_t(int, olr, sizeof(int));
1835 if (olr < 0)
1836 return -EINVAL;
1837
1838 if (put_user(olr, optlen))
1839 return -EFAULT;
1840 if (copy_to_user(optval, &val, olr))
1841 return -EFAULT;
1842 return 0;
1843 }
1844
1845
1846
1847
1848
1849 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1850 {
1851 struct sioc_sg_req6 sr;
1852 struct sioc_mif_req6 vr;
1853 struct vif_device *vif;
1854 struct mfc6_cache *c;
1855 struct net *net = sock_net(sk);
1856 struct mr_table *mrt;
1857
1858 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1859 if (!mrt)
1860 return -ENOENT;
1861
1862 switch (cmd) {
1863 case SIOCGETMIFCNT_IN6:
1864 if (copy_from_user(&vr, arg, sizeof(vr)))
1865 return -EFAULT;
1866 if (vr.mifi >= mrt->maxvif)
1867 return -EINVAL;
1868 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1869 read_lock(&mrt_lock);
1870 vif = &mrt->vif_table[vr.mifi];
1871 if (VIF_EXISTS(mrt, vr.mifi)) {
1872 vr.icount = vif->pkt_in;
1873 vr.ocount = vif->pkt_out;
1874 vr.ibytes = vif->bytes_in;
1875 vr.obytes = vif->bytes_out;
1876 read_unlock(&mrt_lock);
1877
1878 if (copy_to_user(arg, &vr, sizeof(vr)))
1879 return -EFAULT;
1880 return 0;
1881 }
1882 read_unlock(&mrt_lock);
1883 return -EADDRNOTAVAIL;
1884 case SIOCGETSGCNT_IN6:
1885 if (copy_from_user(&sr, arg, sizeof(sr)))
1886 return -EFAULT;
1887
1888 rcu_read_lock();
1889 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1890 if (c) {
1891 sr.pktcnt = c->_c.mfc_un.res.pkt;
1892 sr.bytecnt = c->_c.mfc_un.res.bytes;
1893 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1894 rcu_read_unlock();
1895
1896 if (copy_to_user(arg, &sr, sizeof(sr)))
1897 return -EFAULT;
1898 return 0;
1899 }
1900 rcu_read_unlock();
1901 return -EADDRNOTAVAIL;
1902 default:
1903 return -ENOIOCTLCMD;
1904 }
1905 }
1906
1907 #ifdef CONFIG_COMPAT
1908 struct compat_sioc_sg_req6 {
1909 struct sockaddr_in6 src;
1910 struct sockaddr_in6 grp;
1911 compat_ulong_t pktcnt;
1912 compat_ulong_t bytecnt;
1913 compat_ulong_t wrong_if;
1914 };
1915
1916 struct compat_sioc_mif_req6 {
1917 mifi_t mifi;
1918 compat_ulong_t icount;
1919 compat_ulong_t ocount;
1920 compat_ulong_t ibytes;
1921 compat_ulong_t obytes;
1922 };
1923
1924 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1925 {
1926 struct compat_sioc_sg_req6 sr;
1927 struct compat_sioc_mif_req6 vr;
1928 struct vif_device *vif;
1929 struct mfc6_cache *c;
1930 struct net *net = sock_net(sk);
1931 struct mr_table *mrt;
1932
1933 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1934 if (!mrt)
1935 return -ENOENT;
1936
1937 switch (cmd) {
1938 case SIOCGETMIFCNT_IN6:
1939 if (copy_from_user(&vr, arg, sizeof(vr)))
1940 return -EFAULT;
1941 if (vr.mifi >= mrt->maxvif)
1942 return -EINVAL;
1943 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1944 read_lock(&mrt_lock);
1945 vif = &mrt->vif_table[vr.mifi];
1946 if (VIF_EXISTS(mrt, vr.mifi)) {
1947 vr.icount = vif->pkt_in;
1948 vr.ocount = vif->pkt_out;
1949 vr.ibytes = vif->bytes_in;
1950 vr.obytes = vif->bytes_out;
1951 read_unlock(&mrt_lock);
1952
1953 if (copy_to_user(arg, &vr, sizeof(vr)))
1954 return -EFAULT;
1955 return 0;
1956 }
1957 read_unlock(&mrt_lock);
1958 return -EADDRNOTAVAIL;
1959 case SIOCGETSGCNT_IN6:
1960 if (copy_from_user(&sr, arg, sizeof(sr)))
1961 return -EFAULT;
1962
1963 rcu_read_lock();
1964 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1965 if (c) {
1966 sr.pktcnt = c->_c.mfc_un.res.pkt;
1967 sr.bytecnt = c->_c.mfc_un.res.bytes;
1968 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1969 rcu_read_unlock();
1970
1971 if (copy_to_user(arg, &sr, sizeof(sr)))
1972 return -EFAULT;
1973 return 0;
1974 }
1975 rcu_read_unlock();
1976 return -EADDRNOTAVAIL;
1977 default:
1978 return -ENOIOCTLCMD;
1979 }
1980 }
1981 #endif
1982
1983 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1984 {
1985 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1986 IPSTATS_MIB_OUTFORWDATAGRAMS);
1987 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1988 IPSTATS_MIB_OUTOCTETS, skb->len);
1989 return dst_output(net, sk, skb);
1990 }
1991
1992
1993
1994
1995
1996 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1997 struct sk_buff *skb, int vifi)
1998 {
1999 struct ipv6hdr *ipv6h;
2000 struct vif_device *vif = &mrt->vif_table[vifi];
2001 struct net_device *dev;
2002 struct dst_entry *dst;
2003 struct flowi6 fl6;
2004
2005 if (!vif->dev)
2006 goto out_free;
2007
2008 #ifdef CONFIG_IPV6_PIMSM_V2
2009 if (vif->flags & MIFF_REGISTER) {
2010 vif->pkt_out++;
2011 vif->bytes_out += skb->len;
2012 vif->dev->stats.tx_bytes += skb->len;
2013 vif->dev->stats.tx_packets++;
2014 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2015 goto out_free;
2016 }
2017 #endif
2018
2019 ipv6h = ipv6_hdr(skb);
2020
2021 fl6 = (struct flowi6) {
2022 .flowi6_oif = vif->link,
2023 .daddr = ipv6h->daddr,
2024 };
2025
2026 dst = ip6_route_output(net, NULL, &fl6);
2027 if (dst->error) {
2028 dst_release(dst);
2029 goto out_free;
2030 }
2031
2032 skb_dst_drop(skb);
2033 skb_dst_set(skb, dst);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 dev = vif->dev;
2047 skb->dev = dev;
2048 vif->pkt_out++;
2049 vif->bytes_out += skb->len;
2050
2051
2052
2053 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2054 goto out_free;
2055
2056 ipv6h = ipv6_hdr(skb);
2057 ipv6h->hop_limit--;
2058
2059 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2060
2061 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2062 net, NULL, skb, skb->dev, dev,
2063 ip6mr_forward2_finish);
2064
2065 out_free:
2066 kfree_skb(skb);
2067 return 0;
2068 }
2069
2070 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2071 {
2072 int ct;
2073
2074 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2075 if (mrt->vif_table[ct].dev == dev)
2076 break;
2077 }
2078 return ct;
2079 }
2080
2081 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2082 struct net_device *dev, struct sk_buff *skb,
2083 struct mfc6_cache *c)
2084 {
2085 int psend = -1;
2086 int vif, ct;
2087 int true_vifi = ip6mr_find_vif(mrt, dev);
2088
2089 vif = c->_c.mfc_parent;
2090 c->_c.mfc_un.res.pkt++;
2091 c->_c.mfc_un.res.bytes += skb->len;
2092 c->_c.mfc_un.res.lastuse = jiffies;
2093
2094 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2095 struct mfc6_cache *cache_proxy;
2096
2097
2098
2099
2100 rcu_read_lock();
2101 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2102 if (cache_proxy &&
2103 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2104 rcu_read_unlock();
2105 goto forward;
2106 }
2107 rcu_read_unlock();
2108 }
2109
2110
2111
2112
2113 if (mrt->vif_table[vif].dev != dev) {
2114 c->_c.mfc_un.res.wrong_if++;
2115
2116 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2117
2118
2119
2120
2121
2122 (mrt->mroute_do_pim ||
2123 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2124 time_after(jiffies,
2125 c->_c.mfc_un.res.last_assert +
2126 MFC_ASSERT_THRESH)) {
2127 c->_c.mfc_un.res.last_assert = jiffies;
2128 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2129 }
2130 goto dont_forward;
2131 }
2132
2133 forward:
2134 mrt->vif_table[vif].pkt_in++;
2135 mrt->vif_table[vif].bytes_in += skb->len;
2136
2137
2138
2139
2140 if (ipv6_addr_any(&c->mf6c_origin) &&
2141 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2142 if (true_vifi >= 0 &&
2143 true_vifi != c->_c.mfc_parent &&
2144 ipv6_hdr(skb)->hop_limit >
2145 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2146
2147
2148
2149
2150 psend = c->_c.mfc_parent;
2151 goto last_forward;
2152 }
2153 goto dont_forward;
2154 }
2155 for (ct = c->_c.mfc_un.res.maxvif - 1;
2156 ct >= c->_c.mfc_un.res.minvif; ct--) {
2157
2158 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2159 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2160 if (psend != -1) {
2161 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2162 if (skb2)
2163 ip6mr_forward2(net, mrt, skb2, psend);
2164 }
2165 psend = ct;
2166 }
2167 }
2168 last_forward:
2169 if (psend != -1) {
2170 ip6mr_forward2(net, mrt, skb, psend);
2171 return;
2172 }
2173
2174 dont_forward:
2175 kfree_skb(skb);
2176 }
2177
2178
2179
2180
2181
2182
2183 int ip6_mr_input(struct sk_buff *skb)
2184 {
2185 struct mfc6_cache *cache;
2186 struct net *net = dev_net(skb->dev);
2187 struct mr_table *mrt;
2188 struct flowi6 fl6 = {
2189 .flowi6_iif = skb->dev->ifindex,
2190 .flowi6_mark = skb->mark,
2191 };
2192 int err;
2193 struct net_device *dev;
2194
2195
2196
2197
2198 dev = skb->dev;
2199 if (netif_is_l3_master(skb->dev)) {
2200 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2201 if (!dev) {
2202 kfree_skb(skb);
2203 return -ENODEV;
2204 }
2205 }
2206
2207 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2208 if (err < 0) {
2209 kfree_skb(skb);
2210 return err;
2211 }
2212
2213 read_lock(&mrt_lock);
2214 cache = ip6mr_cache_find(mrt,
2215 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2216 if (!cache) {
2217 int vif = ip6mr_find_vif(mrt, dev);
2218
2219 if (vif >= 0)
2220 cache = ip6mr_cache_find_any(mrt,
2221 &ipv6_hdr(skb)->daddr,
2222 vif);
2223 }
2224
2225
2226
2227
2228 if (!cache) {
2229 int vif;
2230
2231 vif = ip6mr_find_vif(mrt, dev);
2232 if (vif >= 0) {
2233 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2234 read_unlock(&mrt_lock);
2235
2236 return err;
2237 }
2238 read_unlock(&mrt_lock);
2239 kfree_skb(skb);
2240 return -ENODEV;
2241 }
2242
2243 ip6_mr_forward(net, mrt, dev, skb, cache);
2244
2245 read_unlock(&mrt_lock);
2246
2247 return 0;
2248 }
2249
2250 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2251 u32 portid)
2252 {
2253 int err;
2254 struct mr_table *mrt;
2255 struct mfc6_cache *cache;
2256 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2257
2258 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2259 if (!mrt)
2260 return -ENOENT;
2261
2262 read_lock(&mrt_lock);
2263 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2264 if (!cache && skb->dev) {
2265 int vif = ip6mr_find_vif(mrt, skb->dev);
2266
2267 if (vif >= 0)
2268 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2269 vif);
2270 }
2271
2272 if (!cache) {
2273 struct sk_buff *skb2;
2274 struct ipv6hdr *iph;
2275 struct net_device *dev;
2276 int vif;
2277
2278 dev = skb->dev;
2279 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2280 read_unlock(&mrt_lock);
2281 return -ENODEV;
2282 }
2283
2284
2285 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2286 if (!skb2) {
2287 read_unlock(&mrt_lock);
2288 return -ENOMEM;
2289 }
2290
2291 NETLINK_CB(skb2).portid = portid;
2292 skb_reset_transport_header(skb2);
2293
2294 skb_put(skb2, sizeof(struct ipv6hdr));
2295 skb_reset_network_header(skb2);
2296
2297 iph = ipv6_hdr(skb2);
2298 iph->version = 0;
2299 iph->priority = 0;
2300 iph->flow_lbl[0] = 0;
2301 iph->flow_lbl[1] = 0;
2302 iph->flow_lbl[2] = 0;
2303 iph->payload_len = 0;
2304 iph->nexthdr = IPPROTO_NONE;
2305 iph->hop_limit = 0;
2306 iph->saddr = rt->rt6i_src.addr;
2307 iph->daddr = rt->rt6i_dst.addr;
2308
2309 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2310 read_unlock(&mrt_lock);
2311
2312 return err;
2313 }
2314
2315 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2316 read_unlock(&mrt_lock);
2317 return err;
2318 }
2319
2320 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2321 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2322 int flags)
2323 {
2324 struct nlmsghdr *nlh;
2325 struct rtmsg *rtm;
2326 int err;
2327
2328 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2329 if (!nlh)
2330 return -EMSGSIZE;
2331
2332 rtm = nlmsg_data(nlh);
2333 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2334 rtm->rtm_dst_len = 128;
2335 rtm->rtm_src_len = 128;
2336 rtm->rtm_tos = 0;
2337 rtm->rtm_table = mrt->id;
2338 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2339 goto nla_put_failure;
2340 rtm->rtm_type = RTN_MULTICAST;
2341 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2342 if (c->_c.mfc_flags & MFC_STATIC)
2343 rtm->rtm_protocol = RTPROT_STATIC;
2344 else
2345 rtm->rtm_protocol = RTPROT_MROUTED;
2346 rtm->rtm_flags = 0;
2347
2348 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2349 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2350 goto nla_put_failure;
2351 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2352
2353 if (err < 0 && err != -ENOENT)
2354 goto nla_put_failure;
2355
2356 nlmsg_end(skb, nlh);
2357 return 0;
2358
2359 nla_put_failure:
2360 nlmsg_cancel(skb, nlh);
2361 return -EMSGSIZE;
2362 }
2363
2364 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2365 u32 portid, u32 seq, struct mr_mfc *c,
2366 int cmd, int flags)
2367 {
2368 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2369 cmd, flags);
2370 }
2371
2372 static int mr6_msgsize(bool unresolved, int maxvif)
2373 {
2374 size_t len =
2375 NLMSG_ALIGN(sizeof(struct rtmsg))
2376 + nla_total_size(4)
2377 + nla_total_size(sizeof(struct in6_addr))
2378 + nla_total_size(sizeof(struct in6_addr))
2379 ;
2380
2381 if (!unresolved)
2382 len = len
2383 + nla_total_size(4)
2384 + nla_total_size(0)
2385 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2386
2387 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2388 ;
2389
2390 return len;
2391 }
2392
2393 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2394 int cmd)
2395 {
2396 struct net *net = read_pnet(&mrt->net);
2397 struct sk_buff *skb;
2398 int err = -ENOBUFS;
2399
2400 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2401 GFP_ATOMIC);
2402 if (!skb)
2403 goto errout;
2404
2405 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2406 if (err < 0)
2407 goto errout;
2408
2409 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2410 return;
2411
2412 errout:
2413 kfree_skb(skb);
2414 if (err < 0)
2415 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2416 }
2417
2418 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2419 {
2420 size_t len =
2421 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2422 + nla_total_size(1)
2423 + nla_total_size(4)
2424
2425 + nla_total_size(sizeof(struct in6_addr))
2426
2427 + nla_total_size(sizeof(struct in6_addr))
2428
2429 + nla_total_size(payloadlen)
2430 ;
2431
2432 return len;
2433 }
2434
2435 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2436 {
2437 struct net *net = read_pnet(&mrt->net);
2438 struct nlmsghdr *nlh;
2439 struct rtgenmsg *rtgenm;
2440 struct mrt6msg *msg;
2441 struct sk_buff *skb;
2442 struct nlattr *nla;
2443 int payloadlen;
2444
2445 payloadlen = pkt->len - sizeof(struct mrt6msg);
2446 msg = (struct mrt6msg *)skb_transport_header(pkt);
2447
2448 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2449 if (!skb)
2450 goto errout;
2451
2452 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2453 sizeof(struct rtgenmsg), 0);
2454 if (!nlh)
2455 goto errout;
2456 rtgenm = nlmsg_data(nlh);
2457 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2458 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2459 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2460 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2461 &msg->im6_src) ||
2462 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2463 &msg->im6_dst))
2464 goto nla_put_failure;
2465
2466 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2467 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2468 nla_data(nla), payloadlen))
2469 goto nla_put_failure;
2470
2471 nlmsg_end(skb, nlh);
2472
2473 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2474 return;
2475
2476 nla_put_failure:
2477 nlmsg_cancel(skb, nlh);
2478 errout:
2479 kfree_skb(skb);
2480 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2481 }
2482
2483 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2484 {
2485 const struct nlmsghdr *nlh = cb->nlh;
2486 struct fib_dump_filter filter = {};
2487 int err;
2488
2489 if (cb->strict_check) {
2490 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2491 &filter, cb);
2492 if (err < 0)
2493 return err;
2494 }
2495
2496 if (filter.table_id) {
2497 struct mr_table *mrt;
2498
2499 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2500 if (!mrt) {
2501 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
2502 return skb->len;
2503
2504 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2505 return -ENOENT;
2506 }
2507 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2508 &mfc_unres_lock, &filter);
2509 return skb->len ? : err;
2510 }
2511
2512 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2513 _ip6mr_fill_mroute, &mfc_unres_lock, &filter);
2514 }