This source file includes following definitions.
- batadv_v_ogm_orig_get
- batadv_v_ogm_start_queue_timer
- batadv_v_ogm_start_timer
- batadv_v_ogm_send_to_if
- batadv_v_ogm_len
- batadv_v_ogm_queue_left
- batadv_v_ogm_aggr_list_free
- batadv_v_ogm_aggr_send
- batadv_v_ogm_queue_on_if
- batadv_v_ogm_send_softif
- batadv_v_ogm_send
- batadv_v_ogm_aggr_work
- batadv_v_ogm_iface_enable
- batadv_v_ogm_iface_disable
- batadv_v_ogm_primary_iface_set
- batadv_v_forward_penalty
- batadv_v_ogm_forward
- batadv_v_ogm_metric_update
- batadv_v_ogm_route_update
- batadv_v_ogm_process_per_outif
- batadv_v_ogm_aggr_packet
- batadv_v_ogm_process
- batadv_v_ogm_packet_recv
- batadv_v_ogm_init
- batadv_v_ogm_free
1
2
3
4
5
6
7 #include "bat_v_ogm.h"
8 #include "main.h"
9
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/errno.h>
13 #include <linux/etherdevice.h>
14 #include <linux/gfp.h>
15 #include <linux/if_ether.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/kref.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/mutex.h>
22 #include <linux/netdevice.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stddef.h>
30 #include <linux/string.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 #include <uapi/linux/batadv_packet.h>
34
35 #include "bat_algo.h"
36 #include "hard-interface.h"
37 #include "hash.h"
38 #include "log.h"
39 #include "originator.h"
40 #include "routing.h"
41 #include "send.h"
42 #include "translation-table.h"
43 #include "tvlv.h"
44
45
46
47
48
49
50
51
52
53
54 struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
55 const u8 *addr)
56 {
57 struct batadv_orig_node *orig_node;
58 int hash_added;
59
60 orig_node = batadv_orig_hash_find(bat_priv, addr);
61 if (orig_node)
62 return orig_node;
63
64 orig_node = batadv_orig_node_new(bat_priv, addr);
65 if (!orig_node)
66 return NULL;
67
68 kref_get(&orig_node->refcount);
69 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
70 batadv_choose_orig, orig_node,
71 &orig_node->hash_entry);
72 if (hash_added != 0) {
73
74 batadv_orig_node_put(orig_node);
75 batadv_orig_node_put(orig_node);
76 orig_node = NULL;
77 }
78
79 return orig_node;
80 }
81
82
83
84
85
86 static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface)
87 {
88 unsigned int msecs = BATADV_MAX_AGGREGATION_MS * 1000;
89
90
91 msecs += prandom_u32() % (msecs / 5) - (msecs / 10);
92 queue_delayed_work(batadv_event_workqueue, &hard_iface->bat_v.aggr_wq,
93 msecs_to_jiffies(msecs / 1000));
94 }
95
96
97
98
99
100 static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv)
101 {
102 unsigned long msecs;
103
104
105
106 if (delayed_work_pending(&bat_priv->bat_v.ogm_wq))
107 return;
108
109 msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
110 msecs += prandom_u32() % (2 * BATADV_JITTER);
111 queue_delayed_work(batadv_event_workqueue, &bat_priv->bat_v.ogm_wq,
112 msecs_to_jiffies(msecs));
113 }
114
115
116
117
118
119
120 static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
121 struct batadv_hard_iface *hard_iface)
122 {
123 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
124
125 if (hard_iface->if_status != BATADV_IF_ACTIVE)
126 return;
127
128 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
129 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
130 skb->len + ETH_HLEN);
131
132 batadv_send_broadcast_skb(skb, hard_iface);
133 }
134
135
136
137
138
139
140
141
142 static unsigned int batadv_v_ogm_len(struct sk_buff *skb)
143 {
144 struct batadv_ogm2_packet *ogm_packet;
145
146 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
147 return BATADV_OGM2_HLEN + ntohs(ogm_packet->tvlv_len);
148 }
149
150
151
152
153
154
155
156
157
158
159 static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
160 struct batadv_hard_iface *hard_iface)
161 {
162 unsigned int max = min_t(unsigned int, hard_iface->net_dev->mtu,
163 BATADV_MAX_AGGREGATION_BYTES);
164 unsigned int ogm_len = batadv_v_ogm_len(skb);
165
166 lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
167
168 return hard_iface->bat_v.aggr_len + ogm_len <= max;
169 }
170
171
172
173
174
175
176
177
178
179 static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
180 {
181 struct sk_buff *skb;
182
183 lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
184
185 while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list)))
186 kfree_skb(skb);
187
188 hard_iface->bat_v.aggr_len = 0;
189 }
190
191
192
193
194
195
196
197
198
199
200
201
202 static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
203 {
204 unsigned int aggr_len = hard_iface->bat_v.aggr_len;
205 struct sk_buff *skb_aggr;
206 unsigned int ogm_len;
207 struct sk_buff *skb;
208
209 lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
210
211 if (!aggr_len)
212 return;
213
214 skb_aggr = dev_alloc_skb(aggr_len + ETH_HLEN + NET_IP_ALIGN);
215 if (!skb_aggr) {
216 batadv_v_ogm_aggr_list_free(hard_iface);
217 return;
218 }
219
220 skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
221 skb_reset_network_header(skb_aggr);
222
223 while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
224 hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
225
226 ogm_len = batadv_v_ogm_len(skb);
227 skb_put_data(skb_aggr, skb->data, ogm_len);
228
229 consume_skb(skb);
230 }
231
232 batadv_v_ogm_send_to_if(skb_aggr, hard_iface);
233 }
234
235
236
237
238
239
240 static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
241 struct batadv_hard_iface *hard_iface)
242 {
243 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
244
245 if (!atomic_read(&bat_priv->aggregated_ogms)) {
246 batadv_v_ogm_send_to_if(skb, hard_iface);
247 return;
248 }
249
250 spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
251 if (!batadv_v_ogm_queue_left(skb, hard_iface))
252 batadv_v_ogm_aggr_send(hard_iface);
253
254 hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
255 skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
256 spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
257 }
258
259
260
261
262
263 static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
264 {
265 struct batadv_hard_iface *hard_iface;
266 struct batadv_ogm2_packet *ogm_packet;
267 struct sk_buff *skb, *skb_tmp;
268 unsigned char *ogm_buff;
269 int ogm_buff_len;
270 u16 tvlv_len = 0;
271 int ret;
272
273 lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
274
275 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
276 goto out;
277
278 ogm_buff = bat_priv->bat_v.ogm_buff;
279 ogm_buff_len = bat_priv->bat_v.ogm_buff_len;
280
281
282
283 batadv_tt_local_commit_changes(bat_priv);
284 tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, &ogm_buff,
285 &ogm_buff_len,
286 BATADV_OGM2_HLEN);
287
288 bat_priv->bat_v.ogm_buff = ogm_buff;
289 bat_priv->bat_v.ogm_buff_len = ogm_buff_len;
290
291 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + ogm_buff_len);
292 if (!skb)
293 goto reschedule;
294
295 skb_reserve(skb, ETH_HLEN);
296 skb_put_data(skb, ogm_buff, ogm_buff_len);
297
298 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
299 ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno));
300 atomic_inc(&bat_priv->bat_v.ogm_seqno);
301 ogm_packet->tvlv_len = htons(tvlv_len);
302
303
304 rcu_read_lock();
305 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
306 if (hard_iface->soft_iface != bat_priv->soft_iface)
307 continue;
308
309 if (!kref_get_unless_zero(&hard_iface->refcount))
310 continue;
311
312 ret = batadv_hardif_no_broadcast(hard_iface, NULL, NULL);
313 if (ret) {
314 char *type;
315
316 switch (ret) {
317 case BATADV_HARDIF_BCAST_NORECIPIENT:
318 type = "no neighbor";
319 break;
320 case BATADV_HARDIF_BCAST_DUPFWD:
321 type = "single neighbor is source";
322 break;
323 case BATADV_HARDIF_BCAST_DUPORIG:
324 type = "single neighbor is originator";
325 break;
326 default:
327 type = "unknown";
328 }
329
330 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselves on %s suppressed: %s\n",
331 hard_iface->net_dev->name, type);
332
333 batadv_hardif_put(hard_iface);
334 continue;
335 }
336
337 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
338 "Sending own OGM2 packet (originator %pM, seqno %u, throughput %u, TTL %d) on interface %s [%pM]\n",
339 ogm_packet->orig, ntohl(ogm_packet->seqno),
340 ntohl(ogm_packet->throughput), ogm_packet->ttl,
341 hard_iface->net_dev->name,
342 hard_iface->net_dev->dev_addr);
343
344
345 skb_tmp = skb_clone(skb, GFP_ATOMIC);
346 if (!skb_tmp) {
347 batadv_hardif_put(hard_iface);
348 break;
349 }
350
351 batadv_v_ogm_queue_on_if(skb_tmp, hard_iface);
352 batadv_hardif_put(hard_iface);
353 }
354 rcu_read_unlock();
355
356 consume_skb(skb);
357
358 reschedule:
359 batadv_v_ogm_start_timer(bat_priv);
360 out:
361 return;
362 }
363
364
365
366
367
368 static void batadv_v_ogm_send(struct work_struct *work)
369 {
370 struct batadv_priv_bat_v *bat_v;
371 struct batadv_priv *bat_priv;
372
373 bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
374 bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
375
376 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
377 batadv_v_ogm_send_softif(bat_priv);
378 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
379 }
380
381
382
383
384
385
386
387 void batadv_v_ogm_aggr_work(struct work_struct *work)
388 {
389 struct batadv_hard_iface_bat_v *batv;
390 struct batadv_hard_iface *hard_iface;
391
392 batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
393 hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
394
395 spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
396 batadv_v_ogm_aggr_send(hard_iface);
397 spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
398
399 batadv_v_ogm_start_queue_timer(hard_iface);
400 }
401
402
403
404
405
406
407
408
409
410 int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
411 {
412 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
413
414 batadv_v_ogm_start_queue_timer(hard_iface);
415 batadv_v_ogm_start_timer(bat_priv);
416
417 return 0;
418 }
419
420
421
422
423
424 void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
425 {
426 cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
427
428 spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
429 batadv_v_ogm_aggr_list_free(hard_iface);
430 spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
431 }
432
433
434
435
436
437 void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
438 {
439 struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
440 struct batadv_ogm2_packet *ogm_packet;
441
442 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
443 if (!bat_priv->bat_v.ogm_buff)
444 goto unlock;
445
446 ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
447 ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
448
449 unlock:
450 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
451 }
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474 static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
475 struct batadv_hard_iface *if_incoming,
476 struct batadv_hard_iface *if_outgoing,
477 u32 throughput)
478 {
479 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
480 int hop_penalty_max = BATADV_TQ_MAX_VALUE;
481
482
483 if (if_outgoing == BATADV_IF_DEFAULT)
484 return throughput;
485
486
487
488
489
490 if (throughput > 10 &&
491 if_incoming == if_outgoing &&
492 !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
493 return throughput / 2;
494
495
496 return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
497 }
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512 static void batadv_v_ogm_forward(struct batadv_priv *bat_priv,
513 const struct batadv_ogm2_packet *ogm_received,
514 struct batadv_orig_node *orig_node,
515 struct batadv_neigh_node *neigh_node,
516 struct batadv_hard_iface *if_incoming,
517 struct batadv_hard_iface *if_outgoing)
518 {
519 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
520 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
521 struct batadv_neigh_node *router = NULL;
522 struct batadv_ogm2_packet *ogm_forward;
523 unsigned char *skb_buff;
524 struct sk_buff *skb;
525 size_t packet_len;
526 u16 tvlv_len;
527
528
529 if (if_outgoing == BATADV_IF_DEFAULT)
530 goto out;
531
532 orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
533 if (!orig_ifinfo)
534 goto out;
535
536
537 router = batadv_orig_router_get(orig_node, if_outgoing);
538
539
540 if (neigh_node != router)
541 goto out;
542
543
544 if (orig_ifinfo->last_seqno_forwarded == ntohl(ogm_received->seqno))
545 goto out;
546
547 orig_ifinfo->last_seqno_forwarded = ntohl(ogm_received->seqno);
548
549 if (ogm_received->ttl <= 1) {
550 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
551 goto out;
552 }
553
554 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
555 if (!neigh_ifinfo)
556 goto out;
557
558 tvlv_len = ntohs(ogm_received->tvlv_len);
559
560 packet_len = BATADV_OGM2_HLEN + tvlv_len;
561 skb = netdev_alloc_skb_ip_align(if_outgoing->net_dev,
562 ETH_HLEN + packet_len);
563 if (!skb)
564 goto out;
565
566 skb_reserve(skb, ETH_HLEN);
567 skb_buff = skb_put_data(skb, ogm_received, packet_len);
568
569
570 ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
571 ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
572 ogm_forward->ttl--;
573
574 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
575 "Forwarding OGM2 packet on %s: throughput %u, ttl %u, received via %s\n",
576 if_outgoing->net_dev->name, ntohl(ogm_forward->throughput),
577 ogm_forward->ttl, if_incoming->net_dev->name);
578
579 batadv_v_ogm_queue_on_if(skb, if_outgoing);
580
581 out:
582 if (orig_ifinfo)
583 batadv_orig_ifinfo_put(orig_ifinfo);
584 if (router)
585 batadv_neigh_node_put(router);
586 if (neigh_ifinfo)
587 batadv_neigh_ifinfo_put(neigh_ifinfo);
588 }
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv,
605 const struct batadv_ogm2_packet *ogm2,
606 struct batadv_orig_node *orig_node,
607 struct batadv_neigh_node *neigh_node,
608 struct batadv_hard_iface *if_incoming,
609 struct batadv_hard_iface *if_outgoing)
610 {
611 struct batadv_orig_ifinfo *orig_ifinfo;
612 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
613 bool protection_started = false;
614 int ret = -EINVAL;
615 u32 path_throughput;
616 s32 seq_diff;
617
618 orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
619 if (!orig_ifinfo)
620 goto out;
621
622 seq_diff = ntohl(ogm2->seqno) - orig_ifinfo->last_real_seqno;
623
624 if (!hlist_empty(&orig_node->neigh_list) &&
625 batadv_window_protected(bat_priv, seq_diff,
626 BATADV_OGM_MAX_AGE,
627 &orig_ifinfo->batman_seqno_reset,
628 &protection_started)) {
629 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
630 "Drop packet: packet within window protection time from %pM\n",
631 ogm2->orig);
632 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
633 "Last reset: %ld, %ld\n",
634 orig_ifinfo->batman_seqno_reset, jiffies);
635 goto out;
636 }
637
638
639
640
641 if (seq_diff < 0 && !protection_started)
642 goto out;
643
644 neigh_node->last_seen = jiffies;
645
646 orig_node->last_seen = jiffies;
647
648 orig_ifinfo->last_real_seqno = ntohl(ogm2->seqno);
649 orig_ifinfo->last_ttl = ogm2->ttl;
650
651 neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
652 if (!neigh_ifinfo)
653 goto out;
654
655 path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
656 if_outgoing,
657 ntohl(ogm2->throughput));
658 neigh_ifinfo->bat_v.throughput = path_throughput;
659 neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
660 neigh_ifinfo->last_ttl = ogm2->ttl;
661
662 if (seq_diff > 0 || protection_started)
663 ret = 1;
664 else
665 ret = 0;
666 out:
667 if (orig_ifinfo)
668 batadv_orig_ifinfo_put(orig_ifinfo);
669 if (neigh_ifinfo)
670 batadv_neigh_ifinfo_put(neigh_ifinfo);
671
672 return ret;
673 }
674
675
676
677
678
679
680
681
682
683
684
685
686
687 static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv,
688 const struct ethhdr *ethhdr,
689 const struct batadv_ogm2_packet *ogm2,
690 struct batadv_orig_node *orig_node,
691 struct batadv_neigh_node *neigh_node,
692 struct batadv_hard_iface *if_incoming,
693 struct batadv_hard_iface *if_outgoing)
694 {
695 struct batadv_neigh_node *router = NULL;
696 struct batadv_orig_node *orig_neigh_node;
697 struct batadv_neigh_node *orig_neigh_router = NULL;
698 struct batadv_neigh_ifinfo *router_ifinfo = NULL, *neigh_ifinfo = NULL;
699 u32 router_throughput, neigh_throughput;
700 u32 router_last_seqno;
701 u32 neigh_last_seqno;
702 s32 neigh_seq_diff;
703 bool forward = false;
704
705 orig_neigh_node = batadv_v_ogm_orig_get(bat_priv, ethhdr->h_source);
706 if (!orig_neigh_node)
707 goto out;
708
709 orig_neigh_router = batadv_orig_router_get(orig_neigh_node,
710 if_outgoing);
711
712
713
714
715 router = batadv_orig_router_get(orig_node, if_outgoing);
716 if (router && router->orig_node != orig_node && !orig_neigh_router) {
717 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
718 "Drop packet: OGM via unknown neighbor!\n");
719 goto out;
720 }
721
722
723
724
725 forward = true;
726
727 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
728 "Searching and updating originator entry of received packet\n");
729
730
731
732
733 if (router == neigh_node)
734 goto out;
735
736
737
738
739
740 if (router) {
741 router_ifinfo = batadv_neigh_ifinfo_get(router, if_outgoing);
742 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh_node, if_outgoing);
743
744
745 if (!router_ifinfo || !neigh_ifinfo)
746 goto out;
747
748 neigh_last_seqno = neigh_ifinfo->bat_v.last_seqno;
749 router_last_seqno = router_ifinfo->bat_v.last_seqno;
750 neigh_seq_diff = neigh_last_seqno - router_last_seqno;
751 router_throughput = router_ifinfo->bat_v.throughput;
752 neigh_throughput = neigh_ifinfo->bat_v.throughput;
753
754 if (neigh_seq_diff < BATADV_OGM_MAX_ORIGDIFF &&
755 router_throughput >= neigh_throughput)
756 goto out;
757 }
758
759 batadv_update_route(bat_priv, orig_node, if_outgoing, neigh_node);
760 out:
761 if (router)
762 batadv_neigh_node_put(router);
763 if (orig_neigh_router)
764 batadv_neigh_node_put(orig_neigh_router);
765 if (orig_neigh_node)
766 batadv_orig_node_put(orig_neigh_node);
767 if (router_ifinfo)
768 batadv_neigh_ifinfo_put(router_ifinfo);
769 if (neigh_ifinfo)
770 batadv_neigh_ifinfo_put(neigh_ifinfo);
771
772 return forward;
773 }
774
775
776
777
778
779
780
781
782
783
784
785 static void
786 batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
787 const struct ethhdr *ethhdr,
788 const struct batadv_ogm2_packet *ogm2,
789 struct batadv_orig_node *orig_node,
790 struct batadv_neigh_node *neigh_node,
791 struct batadv_hard_iface *if_incoming,
792 struct batadv_hard_iface *if_outgoing)
793 {
794 int seqno_age;
795 bool forward;
796
797
798 seqno_age = batadv_v_ogm_metric_update(bat_priv, ogm2, orig_node,
799 neigh_node, if_incoming,
800 if_outgoing);
801
802
803 if (seqno_age < 0)
804 return;
805
806
807 if (seqno_age > 0 && if_outgoing == BATADV_IF_DEFAULT)
808 batadv_tvlv_containers_process(bat_priv, true, orig_node,
809 NULL, NULL,
810 (unsigned char *)(ogm2 + 1),
811 ntohs(ogm2->tvlv_len));
812
813
814 forward = batadv_v_ogm_route_update(bat_priv, ethhdr, ogm2, orig_node,
815 neigh_node, if_incoming,
816 if_outgoing);
817
818
819 if (forward)
820 batadv_v_ogm_forward(bat_priv, ogm2, orig_node, neigh_node,
821 if_incoming, if_outgoing);
822 }
823
824
825
826
827
828
829
830
831
832 static bool
833 batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
834 const struct batadv_ogm2_packet *ogm2_packet)
835 {
836 int next_buff_pos = 0;
837
838
839 next_buff_pos += buff_pos + sizeof(*ogm2_packet);
840 if (next_buff_pos > packet_len)
841 return false;
842
843
844 next_buff_pos += ntohs(ogm2_packet->tvlv_len);
845
846 return (next_buff_pos <= packet_len) &&
847 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
848 }
849
850
851
852
853
854
855
856 static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
857 struct batadv_hard_iface *if_incoming)
858 {
859 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
860 struct ethhdr *ethhdr;
861 struct batadv_orig_node *orig_node = NULL;
862 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
863 struct batadv_neigh_node *neigh_node = NULL;
864 struct batadv_hard_iface *hard_iface;
865 struct batadv_ogm2_packet *ogm_packet;
866 u32 ogm_throughput, link_throughput, path_throughput;
867 int ret;
868
869 ethhdr = eth_hdr(skb);
870 ogm_packet = (struct batadv_ogm2_packet *)(skb->data + ogm_offset);
871
872 ogm_throughput = ntohl(ogm_packet->throughput);
873
874 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
875 "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, throughput %u, TTL %u, V %u, tvlv_len %u)\n",
876 ethhdr->h_source, if_incoming->net_dev->name,
877 if_incoming->net_dev->dev_addr, ogm_packet->orig,
878 ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
879 ogm_packet->version, ntohs(ogm_packet->tvlv_len));
880
881
882
883
884 if (ogm_throughput == 0) {
885 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
886 "Drop packet: originator packet with throughput metric of 0\n");
887 return;
888 }
889
890
891 hardif_neigh = batadv_hardif_neigh_get(if_incoming, ethhdr->h_source);
892 if (!hardif_neigh) {
893 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
894 "Drop packet: OGM via unknown neighbor!\n");
895 goto out;
896 }
897
898 orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
899 if (!orig_node)
900 goto out;
901
902 neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
903 ethhdr->h_source);
904 if (!neigh_node)
905 goto out;
906
907
908
909
910
911
912
913
914 link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
915 path_throughput = min_t(u32, link_throughput, ogm_throughput);
916 ogm_packet->throughput = htonl(path_throughput);
917
918 batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
919 neigh_node, if_incoming,
920 BATADV_IF_DEFAULT);
921
922 rcu_read_lock();
923 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
924 if (hard_iface->if_status != BATADV_IF_ACTIVE)
925 continue;
926
927 if (hard_iface->soft_iface != bat_priv->soft_iface)
928 continue;
929
930 if (!kref_get_unless_zero(&hard_iface->refcount))
931 continue;
932
933 ret = batadv_hardif_no_broadcast(hard_iface,
934 ogm_packet->orig,
935 hardif_neigh->orig);
936
937 if (ret) {
938 char *type;
939
940 switch (ret) {
941 case BATADV_HARDIF_BCAST_NORECIPIENT:
942 type = "no neighbor";
943 break;
944 case BATADV_HARDIF_BCAST_DUPFWD:
945 type = "single neighbor is source";
946 break;
947 case BATADV_HARDIF_BCAST_DUPORIG:
948 type = "single neighbor is originator";
949 break;
950 default:
951 type = "unknown";
952 }
953
954 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s suppressed: %s\n",
955 ogm_packet->orig, hard_iface->net_dev->name,
956 type);
957
958 batadv_hardif_put(hard_iface);
959 continue;
960 }
961
962 batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet,
963 orig_node, neigh_node,
964 if_incoming, hard_iface);
965
966 batadv_hardif_put(hard_iface);
967 }
968 rcu_read_unlock();
969 out:
970 if (orig_node)
971 batadv_orig_node_put(orig_node);
972 if (neigh_node)
973 batadv_neigh_node_put(neigh_node);
974 if (hardif_neigh)
975 batadv_hardif_neigh_put(hardif_neigh);
976 }
977
978
979
980
981
982
983
984
985
986 int batadv_v_ogm_packet_recv(struct sk_buff *skb,
987 struct batadv_hard_iface *if_incoming)
988 {
989 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
990 struct batadv_ogm2_packet *ogm_packet;
991 struct ethhdr *ethhdr = eth_hdr(skb);
992 int ogm_offset;
993 u8 *packet_pos;
994 int ret = NET_RX_DROP;
995
996
997
998
999 if (strcmp(bat_priv->algo_ops->name, "BATMAN_V") != 0)
1000 goto free_skb;
1001
1002 if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN))
1003 goto free_skb;
1004
1005 if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
1006 goto free_skb;
1007
1008 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
1009
1010 if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
1011 goto free_skb;
1012
1013 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
1014 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
1015 skb->len + ETH_HLEN);
1016
1017 ogm_offset = 0;
1018 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
1019
1020 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1021 ogm_packet)) {
1022 batadv_v_ogm_process(skb, ogm_offset, if_incoming);
1023
1024 ogm_offset += BATADV_OGM2_HLEN;
1025 ogm_offset += ntohs(ogm_packet->tvlv_len);
1026
1027 packet_pos = skb->data + ogm_offset;
1028 ogm_packet = (struct batadv_ogm2_packet *)packet_pos;
1029 }
1030
1031 ret = NET_RX_SUCCESS;
1032
1033 free_skb:
1034 if (ret == NET_RX_SUCCESS)
1035 consume_skb(skb);
1036 else
1037 kfree_skb(skb);
1038
1039 return ret;
1040 }
1041
1042
1043
1044
1045
1046
1047
1048 int batadv_v_ogm_init(struct batadv_priv *bat_priv)
1049 {
1050 struct batadv_ogm2_packet *ogm_packet;
1051 unsigned char *ogm_buff;
1052 u32 random_seqno;
1053
1054 bat_priv->bat_v.ogm_buff_len = BATADV_OGM2_HLEN;
1055 ogm_buff = kzalloc(bat_priv->bat_v.ogm_buff_len, GFP_ATOMIC);
1056 if (!ogm_buff)
1057 return -ENOMEM;
1058
1059 bat_priv->bat_v.ogm_buff = ogm_buff;
1060 ogm_packet = (struct batadv_ogm2_packet *)ogm_buff;
1061 ogm_packet->packet_type = BATADV_OGM2;
1062 ogm_packet->version = BATADV_COMPAT_VERSION;
1063 ogm_packet->ttl = BATADV_TTL;
1064 ogm_packet->flags = BATADV_NO_FLAGS;
1065 ogm_packet->throughput = htonl(BATADV_THROUGHPUT_MAX_VALUE);
1066
1067
1068 get_random_bytes(&random_seqno, sizeof(random_seqno));
1069 atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
1070 INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
1071
1072 mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
1073
1074 return 0;
1075 }
1076
1077
1078
1079
1080
1081 void batadv_v_ogm_free(struct batadv_priv *bat_priv)
1082 {
1083 cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
1084
1085 mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1086
1087 kfree(bat_priv->bat_v.ogm_buff);
1088 bat_priv->bat_v.ogm_buff = NULL;
1089 bat_priv->bat_v.ogm_buff_len = 0;
1090
1091 mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1092 }