1 /* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
2 *
3 * Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19 #include "hash.h"
20 #include "hard-interface.h"
21 #include "originator.h"
22 #include "bridge_loop_avoidance.h"
23 #include "translation-table.h"
24 #include "send.h"
25
26 #include <linux/etherdevice.h>
27 #include <linux/crc16.h>
28 #include <linux/if_arp.h>
29 #include <net/arp.h>
30 #include <linux/if_vlan.h>
31
32 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
33
34 static void batadv_bla_periodic_work(struct work_struct *work);
35 static void
36 batadv_bla_send_announce(struct batadv_priv *bat_priv,
37 struct batadv_bla_backbone_gw *backbone_gw);
38
39 /* return the index of the claim */
batadv_choose_claim(const void * data,uint32_t size)40 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
41 {
42 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
43 uint32_t hash = 0;
44
45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
47
48 hash += (hash << 3);
49 hash ^= (hash >> 11);
50 hash += (hash << 15);
51
52 return hash % size;
53 }
54
55 /* return the index of the backbone gateway */
batadv_choose_backbone_gw(const void * data,uint32_t size)56 static inline uint32_t batadv_choose_backbone_gw(const void *data,
57 uint32_t size)
58 {
59 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
60 uint32_t hash = 0;
61
62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
64
65 hash += (hash << 3);
66 hash ^= (hash >> 11);
67 hash += (hash << 15);
68
69 return hash % size;
70 }
71
72 /* compares address and vid of two backbone gws */
batadv_compare_backbone_gw(const struct hlist_node * node,const void * data2)73 static int batadv_compare_backbone_gw(const struct hlist_node *node,
74 const void *data2)
75 {
76 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
77 hash_entry);
78 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
79
80 if (!batadv_compare_eth(gw1->orig, gw2->orig))
81 return 0;
82
83 if (gw1->vid != gw2->vid)
84 return 0;
85
86 return 1;
87 }
88
89 /* compares address and vid of two claims */
batadv_compare_claim(const struct hlist_node * node,const void * data2)90 static int batadv_compare_claim(const struct hlist_node *node,
91 const void *data2)
92 {
93 const void *data1 = container_of(node, struct batadv_bla_claim,
94 hash_entry);
95 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
96
97 if (!batadv_compare_eth(cl1->addr, cl2->addr))
98 return 0;
99
100 if (cl1->vid != cl2->vid)
101 return 0;
102
103 return 1;
104 }
105
106 /* free a backbone gw */
107 static void
batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw * backbone_gw)108 batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
109 {
110 if (atomic_dec_and_test(&backbone_gw->refcount))
111 kfree_rcu(backbone_gw, rcu);
112 }
113
114 /* finally deinitialize the claim */
batadv_claim_release(struct batadv_bla_claim * claim)115 static void batadv_claim_release(struct batadv_bla_claim *claim)
116 {
117 batadv_backbone_gw_free_ref(claim->backbone_gw);
118 kfree_rcu(claim, rcu);
119 }
120
121 /* free a claim, call claim_free_rcu if its the last reference */
batadv_claim_free_ref(struct batadv_bla_claim * claim)122 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
123 {
124 if (atomic_dec_and_test(&claim->refcount))
125 batadv_claim_release(claim);
126 }
127
128 /**
129 * batadv_claim_hash_find
130 * @bat_priv: the bat priv with all the soft interface information
131 * @data: search data (may be local/static data)
132 *
133 * looks for a claim in the hash, and returns it if found
134 * or NULL otherwise.
135 */
136 static struct batadv_bla_claim
batadv_claim_hash_find(struct batadv_priv * bat_priv,struct batadv_bla_claim * data)137 *batadv_claim_hash_find(struct batadv_priv *bat_priv,
138 struct batadv_bla_claim *data)
139 {
140 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
141 struct hlist_head *head;
142 struct batadv_bla_claim *claim;
143 struct batadv_bla_claim *claim_tmp = NULL;
144 int index;
145
146 if (!hash)
147 return NULL;
148
149 index = batadv_choose_claim(data, hash->size);
150 head = &hash->table[index];
151
152 rcu_read_lock();
153 hlist_for_each_entry_rcu(claim, head, hash_entry) {
154 if (!batadv_compare_claim(&claim->hash_entry, data))
155 continue;
156
157 if (!atomic_inc_not_zero(&claim->refcount))
158 continue;
159
160 claim_tmp = claim;
161 break;
162 }
163 rcu_read_unlock();
164
165 return claim_tmp;
166 }
167
168 /**
169 * batadv_backbone_hash_find - looks for a claim in the hash
170 * @bat_priv: the bat priv with all the soft interface information
171 * @addr: the address of the originator
172 * @vid: the VLAN ID
173 *
174 * Returns claim if found or NULL otherwise.
175 */
176 static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv * bat_priv,uint8_t * addr,unsigned short vid)177 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
178 uint8_t *addr, unsigned short vid)
179 {
180 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
181 struct hlist_head *head;
182 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
183 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
184 int index;
185
186 if (!hash)
187 return NULL;
188
189 ether_addr_copy(search_entry.orig, addr);
190 search_entry.vid = vid;
191
192 index = batadv_choose_backbone_gw(&search_entry, hash->size);
193 head = &hash->table[index];
194
195 rcu_read_lock();
196 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
197 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
198 &search_entry))
199 continue;
200
201 if (!atomic_inc_not_zero(&backbone_gw->refcount))
202 continue;
203
204 backbone_gw_tmp = backbone_gw;
205 break;
206 }
207 rcu_read_unlock();
208
209 return backbone_gw_tmp;
210 }
211
212 /* delete all claims for a backbone */
213 static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw * backbone_gw)214 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
215 {
216 struct batadv_hashtable *hash;
217 struct hlist_node *node_tmp;
218 struct hlist_head *head;
219 struct batadv_bla_claim *claim;
220 int i;
221 spinlock_t *list_lock; /* protects write access to the hash lists */
222
223 hash = backbone_gw->bat_priv->bla.claim_hash;
224 if (!hash)
225 return;
226
227 for (i = 0; i < hash->size; i++) {
228 head = &hash->table[i];
229 list_lock = &hash->list_locks[i];
230
231 spin_lock_bh(list_lock);
232 hlist_for_each_entry_safe(claim, node_tmp,
233 head, hash_entry) {
234 if (claim->backbone_gw != backbone_gw)
235 continue;
236
237 batadv_claim_free_ref(claim);
238 hlist_del_rcu(&claim->hash_entry);
239 }
240 spin_unlock_bh(list_lock);
241 }
242
243 /* all claims gone, initialize CRC */
244 backbone_gw->crc = BATADV_BLA_CRC_INIT;
245 }
246
247 /**
248 * batadv_bla_send_claim - sends a claim frame according to the provided info
249 * @bat_priv: the bat priv with all the soft interface information
250 * @mac: the mac address to be announced within the claim
251 * @vid: the VLAN ID
252 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
253 */
batadv_bla_send_claim(struct batadv_priv * bat_priv,uint8_t * mac,unsigned short vid,int claimtype)254 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
255 unsigned short vid, int claimtype)
256 {
257 struct sk_buff *skb;
258 struct ethhdr *ethhdr;
259 struct batadv_hard_iface *primary_if;
260 struct net_device *soft_iface;
261 uint8_t *hw_src;
262 struct batadv_bla_claim_dst local_claim_dest;
263 __be32 zeroip = 0;
264
265 primary_if = batadv_primary_if_get_selected(bat_priv);
266 if (!primary_if)
267 return;
268
269 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
270 sizeof(local_claim_dest));
271 local_claim_dest.type = claimtype;
272
273 soft_iface = primary_if->soft_iface;
274
275 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
276 /* IP DST: 0.0.0.0 */
277 zeroip,
278 primary_if->soft_iface,
279 /* IP SRC: 0.0.0.0 */
280 zeroip,
281 /* Ethernet DST: Broadcast */
282 NULL,
283 /* Ethernet SRC/HW SRC: originator mac */
284 primary_if->net_dev->dev_addr,
285 /* HW DST: FF:43:05:XX:YY:YY
286 * with XX = claim type
287 * and YY:YY = group id
288 */
289 (uint8_t *)&local_claim_dest);
290
291 if (!skb)
292 goto out;
293
294 ethhdr = (struct ethhdr *)skb->data;
295 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
296
297 /* now we pretend that the client would have sent this ... */
298 switch (claimtype) {
299 case BATADV_CLAIM_TYPE_CLAIM:
300 /* normal claim frame
301 * set Ethernet SRC to the clients mac
302 */
303 ether_addr_copy(ethhdr->h_source, mac);
304 batadv_dbg(BATADV_DBG_BLA, bat_priv,
305 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
306 BATADV_PRINT_VID(vid));
307 break;
308 case BATADV_CLAIM_TYPE_UNCLAIM:
309 /* unclaim frame
310 * set HW SRC to the clients mac
311 */
312 ether_addr_copy(hw_src, mac);
313 batadv_dbg(BATADV_DBG_BLA, bat_priv,
314 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
315 BATADV_PRINT_VID(vid));
316 break;
317 case BATADV_CLAIM_TYPE_ANNOUNCE:
318 /* announcement frame
319 * set HW SRC to the special mac containg the crc
320 */
321 ether_addr_copy(hw_src, mac);
322 batadv_dbg(BATADV_DBG_BLA, bat_priv,
323 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
324 ethhdr->h_source, BATADV_PRINT_VID(vid));
325 break;
326 case BATADV_CLAIM_TYPE_REQUEST:
327 /* request frame
328 * set HW SRC and header destination to the receiving backbone
329 * gws mac
330 */
331 ether_addr_copy(hw_src, mac);
332 ether_addr_copy(ethhdr->h_dest, mac);
333 batadv_dbg(BATADV_DBG_BLA, bat_priv,
334 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
335 ethhdr->h_source, ethhdr->h_dest,
336 BATADV_PRINT_VID(vid));
337 break;
338 }
339
340 if (vid & BATADV_VLAN_HAS_TAG)
341 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
342 vid & VLAN_VID_MASK);
343
344 skb_reset_mac_header(skb);
345 skb->protocol = eth_type_trans(skb, soft_iface);
346 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
347 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
348 skb->len + ETH_HLEN);
349 soft_iface->last_rx = jiffies;
350
351 netif_rx(skb);
352 out:
353 if (primary_if)
354 batadv_hardif_free_ref(primary_if);
355 }
356
357 /**
358 * batadv_bla_get_backbone_gw
359 * @bat_priv: the bat priv with all the soft interface information
360 * @orig: the mac address of the originator
361 * @vid: the VLAN ID
362 * @own_backbone: set if the requested backbone is local
363 *
364 * searches for the backbone gw or creates a new one if it could not
365 * be found.
366 */
367 static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv * bat_priv,uint8_t * orig,unsigned short vid,bool own_backbone)368 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
369 unsigned short vid, bool own_backbone)
370 {
371 struct batadv_bla_backbone_gw *entry;
372 struct batadv_orig_node *orig_node;
373 int hash_added;
374
375 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
376
377 if (entry)
378 return entry;
379
380 batadv_dbg(BATADV_DBG_BLA, bat_priv,
381 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
382 orig, BATADV_PRINT_VID(vid));
383
384 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
385 if (!entry)
386 return NULL;
387
388 entry->vid = vid;
389 entry->lasttime = jiffies;
390 entry->crc = BATADV_BLA_CRC_INIT;
391 entry->bat_priv = bat_priv;
392 atomic_set(&entry->request_sent, 0);
393 atomic_set(&entry->wait_periods, 0);
394 ether_addr_copy(entry->orig, orig);
395
396 /* one for the hash, one for returning */
397 atomic_set(&entry->refcount, 2);
398
399 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
400 batadv_compare_backbone_gw,
401 batadv_choose_backbone_gw, entry,
402 &entry->hash_entry);
403
404 if (unlikely(hash_added != 0)) {
405 /* hash failed, free the structure */
406 kfree(entry);
407 return NULL;
408 }
409
410 /* this is a gateway now, remove any TT entry on this VLAN */
411 orig_node = batadv_orig_hash_find(bat_priv, orig);
412 if (orig_node) {
413 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
414 "became a backbone gateway");
415 batadv_orig_node_free_ref(orig_node);
416 }
417
418 if (own_backbone) {
419 batadv_bla_send_announce(bat_priv, entry);
420
421 /* this will be decreased in the worker thread */
422 atomic_inc(&entry->request_sent);
423 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
424 atomic_inc(&bat_priv->bla.num_requests);
425 }
426
427 return entry;
428 }
429
430 /* update or add the own backbone gw to make sure we announce
431 * where we receive other backbone gws
432 */
433 static void
batadv_bla_update_own_backbone_gw(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)434 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
435 struct batadv_hard_iface *primary_if,
436 unsigned short vid)
437 {
438 struct batadv_bla_backbone_gw *backbone_gw;
439
440 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
441 primary_if->net_dev->dev_addr,
442 vid, true);
443 if (unlikely(!backbone_gw))
444 return;
445
446 backbone_gw->lasttime = jiffies;
447 batadv_backbone_gw_free_ref(backbone_gw);
448 }
449
450 /**
451 * batadv_bla_answer_request - answer a bla request by sending own claims
452 * @bat_priv: the bat priv with all the soft interface information
453 * @primary_if: interface where the request came on
454 * @vid: the vid where the request came on
455 *
456 * Repeat all of our own claims, and finally send an ANNOUNCE frame
457 * to allow the requester another check if the CRC is correct now.
458 */
batadv_bla_answer_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)459 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
460 struct batadv_hard_iface *primary_if,
461 unsigned short vid)
462 {
463 struct hlist_head *head;
464 struct batadv_hashtable *hash;
465 struct batadv_bla_claim *claim;
466 struct batadv_bla_backbone_gw *backbone_gw;
467 int i;
468
469 batadv_dbg(BATADV_DBG_BLA, bat_priv,
470 "bla_answer_request(): received a claim request, send all of our own claims again\n");
471
472 backbone_gw = batadv_backbone_hash_find(bat_priv,
473 primary_if->net_dev->dev_addr,
474 vid);
475 if (!backbone_gw)
476 return;
477
478 hash = bat_priv->bla.claim_hash;
479 for (i = 0; i < hash->size; i++) {
480 head = &hash->table[i];
481
482 rcu_read_lock();
483 hlist_for_each_entry_rcu(claim, head, hash_entry) {
484 /* only own claims are interesting */
485 if (claim->backbone_gw != backbone_gw)
486 continue;
487
488 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
489 BATADV_CLAIM_TYPE_CLAIM);
490 }
491 rcu_read_unlock();
492 }
493
494 /* finally, send an announcement frame */
495 batadv_bla_send_announce(bat_priv, backbone_gw);
496 batadv_backbone_gw_free_ref(backbone_gw);
497 }
498
499 /**
500 * batadv_bla_send_request - send a request to repeat claims
501 * @backbone_gw: the backbone gateway from whom we are out of sync
502 *
503 * When the crc is wrong, ask the backbone gateway for a full table update.
504 * After the request, it will repeat all of his own claims and finally
505 * send an announcement claim with which we can check again.
506 */
batadv_bla_send_request(struct batadv_bla_backbone_gw * backbone_gw)507 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
508 {
509 /* first, remove all old entries */
510 batadv_bla_del_backbone_claims(backbone_gw);
511
512 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
513 "Sending REQUEST to %pM\n", backbone_gw->orig);
514
515 /* send request */
516 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
517 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
518
519 /* no local broadcasts should be sent or received, for now. */
520 if (!atomic_read(&backbone_gw->request_sent)) {
521 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
522 atomic_set(&backbone_gw->request_sent, 1);
523 }
524 }
525
526 /**
527 * batadv_bla_send_announce
528 * @bat_priv: the bat priv with all the soft interface information
529 * @backbone_gw: our backbone gateway which should be announced
530 *
531 * This function sends an announcement. It is called from multiple
532 * places.
533 */
batadv_bla_send_announce(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)534 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
535 struct batadv_bla_backbone_gw *backbone_gw)
536 {
537 uint8_t mac[ETH_ALEN];
538 __be16 crc;
539
540 memcpy(mac, batadv_announce_mac, 4);
541 crc = htons(backbone_gw->crc);
542 memcpy(&mac[4], &crc, 2);
543
544 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
545 BATADV_CLAIM_TYPE_ANNOUNCE);
546 }
547
548 /**
549 * batadv_bla_add_claim - Adds a claim in the claim hash
550 * @bat_priv: the bat priv with all the soft interface information
551 * @mac: the mac address of the claim
552 * @vid: the VLAN ID of the frame
553 * @backbone_gw: the backbone gateway which claims it
554 */
batadv_bla_add_claim(struct batadv_priv * bat_priv,const uint8_t * mac,const unsigned short vid,struct batadv_bla_backbone_gw * backbone_gw)555 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
556 const uint8_t *mac, const unsigned short vid,
557 struct batadv_bla_backbone_gw *backbone_gw)
558 {
559 struct batadv_bla_claim *claim;
560 struct batadv_bla_claim search_claim;
561 int hash_added;
562
563 ether_addr_copy(search_claim.addr, mac);
564 search_claim.vid = vid;
565 claim = batadv_claim_hash_find(bat_priv, &search_claim);
566
567 /* create a new claim entry if it does not exist yet. */
568 if (!claim) {
569 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
570 if (!claim)
571 return;
572
573 ether_addr_copy(claim->addr, mac);
574 claim->vid = vid;
575 claim->lasttime = jiffies;
576 claim->backbone_gw = backbone_gw;
577
578 atomic_set(&claim->refcount, 2);
579 batadv_dbg(BATADV_DBG_BLA, bat_priv,
580 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
581 mac, BATADV_PRINT_VID(vid));
582 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
583 batadv_compare_claim,
584 batadv_choose_claim, claim,
585 &claim->hash_entry);
586
587 if (unlikely(hash_added != 0)) {
588 /* only local changes happened. */
589 kfree(claim);
590 return;
591 }
592 } else {
593 claim->lasttime = jiffies;
594 if (claim->backbone_gw == backbone_gw)
595 /* no need to register a new backbone */
596 goto claim_free_ref;
597
598 batadv_dbg(BATADV_DBG_BLA, bat_priv,
599 "bla_add_claim(): changing ownership for %pM, vid %d\n",
600 mac, BATADV_PRINT_VID(vid));
601
602 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
603 batadv_backbone_gw_free_ref(claim->backbone_gw);
604 }
605 /* set (new) backbone gw */
606 atomic_inc(&backbone_gw->refcount);
607 claim->backbone_gw = backbone_gw;
608
609 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
610 backbone_gw->lasttime = jiffies;
611
612 claim_free_ref:
613 batadv_claim_free_ref(claim);
614 }
615
616 /* Delete a claim from the claim hash which has the
617 * given mac address and vid.
618 */
batadv_bla_del_claim(struct batadv_priv * bat_priv,const uint8_t * mac,const unsigned short vid)619 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
620 const uint8_t *mac, const unsigned short vid)
621 {
622 struct batadv_bla_claim search_claim, *claim;
623
624 ether_addr_copy(search_claim.addr, mac);
625 search_claim.vid = vid;
626 claim = batadv_claim_hash_find(bat_priv, &search_claim);
627 if (!claim)
628 return;
629
630 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
631 mac, BATADV_PRINT_VID(vid));
632
633 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
634 batadv_choose_claim, claim);
635 batadv_claim_free_ref(claim); /* reference from the hash is gone */
636
637 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
638
639 /* don't need the reference from hash_find() anymore */
640 batadv_claim_free_ref(claim);
641 }
642
643 /* check for ANNOUNCE frame, return 1 if handled */
batadv_handle_announce(struct batadv_priv * bat_priv,uint8_t * an_addr,uint8_t * backbone_addr,unsigned short vid)644 static int batadv_handle_announce(struct batadv_priv *bat_priv,
645 uint8_t *an_addr, uint8_t *backbone_addr,
646 unsigned short vid)
647 {
648 struct batadv_bla_backbone_gw *backbone_gw;
649 uint16_t crc;
650
651 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
652 return 0;
653
654 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
655 false);
656
657 if (unlikely(!backbone_gw))
658 return 1;
659
660 /* handle as ANNOUNCE frame */
661 backbone_gw->lasttime = jiffies;
662 crc = ntohs(*((__be16 *)(&an_addr[4])));
663
664 batadv_dbg(BATADV_DBG_BLA, bat_priv,
665 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
666 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
667
668 if (backbone_gw->crc != crc) {
669 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
670 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
671 backbone_gw->orig,
672 BATADV_PRINT_VID(backbone_gw->vid),
673 backbone_gw->crc, crc);
674
675 batadv_bla_send_request(backbone_gw);
676 } else {
677 /* if we have sent a request and the crc was OK,
678 * we can allow traffic again.
679 */
680 if (atomic_read(&backbone_gw->request_sent)) {
681 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
682 atomic_set(&backbone_gw->request_sent, 0);
683 }
684 }
685
686 batadv_backbone_gw_free_ref(backbone_gw);
687 return 1;
688 }
689
690 /* check for REQUEST frame, return 1 if handled */
batadv_handle_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,uint8_t * backbone_addr,struct ethhdr * ethhdr,unsigned short vid)691 static int batadv_handle_request(struct batadv_priv *bat_priv,
692 struct batadv_hard_iface *primary_if,
693 uint8_t *backbone_addr,
694 struct ethhdr *ethhdr, unsigned short vid)
695 {
696 /* check for REQUEST frame */
697 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
698 return 0;
699
700 /* sanity check, this should not happen on a normal switch,
701 * we ignore it in this case.
702 */
703 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
704 return 1;
705
706 batadv_dbg(BATADV_DBG_BLA, bat_priv,
707 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
708 BATADV_PRINT_VID(vid), ethhdr->h_source);
709
710 batadv_bla_answer_request(bat_priv, primary_if, vid);
711 return 1;
712 }
713
714 /* check for UNCLAIM frame, return 1 if handled */
batadv_handle_unclaim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,uint8_t * backbone_addr,uint8_t * claim_addr,unsigned short vid)715 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
716 struct batadv_hard_iface *primary_if,
717 uint8_t *backbone_addr,
718 uint8_t *claim_addr, unsigned short vid)
719 {
720 struct batadv_bla_backbone_gw *backbone_gw;
721
722 /* unclaim in any case if it is our own */
723 if (primary_if && batadv_compare_eth(backbone_addr,
724 primary_if->net_dev->dev_addr))
725 batadv_bla_send_claim(bat_priv, claim_addr, vid,
726 BATADV_CLAIM_TYPE_UNCLAIM);
727
728 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
729
730 if (!backbone_gw)
731 return 1;
732
733 /* this must be an UNCLAIM frame */
734 batadv_dbg(BATADV_DBG_BLA, bat_priv,
735 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
736 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
737
738 batadv_bla_del_claim(bat_priv, claim_addr, vid);
739 batadv_backbone_gw_free_ref(backbone_gw);
740 return 1;
741 }
742
743 /* check for CLAIM frame, return 1 if handled */
batadv_handle_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,uint8_t * backbone_addr,uint8_t * claim_addr,unsigned short vid)744 static int batadv_handle_claim(struct batadv_priv *bat_priv,
745 struct batadv_hard_iface *primary_if,
746 uint8_t *backbone_addr, uint8_t *claim_addr,
747 unsigned short vid)
748 {
749 struct batadv_bla_backbone_gw *backbone_gw;
750
751 /* register the gateway if not yet available, and add the claim. */
752
753 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
754 false);
755
756 if (unlikely(!backbone_gw))
757 return 1;
758
759 /* this must be a CLAIM frame */
760 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
761 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
762 batadv_bla_send_claim(bat_priv, claim_addr, vid,
763 BATADV_CLAIM_TYPE_CLAIM);
764
765 /* TODO: we could call something like tt_local_del() here. */
766
767 batadv_backbone_gw_free_ref(backbone_gw);
768 return 1;
769 }
770
771 /**
772 * batadv_check_claim_group
773 * @bat_priv: the bat priv with all the soft interface information
774 * @primary_if: the primary interface of this batman interface
775 * @hw_src: the Hardware source in the ARP Header
776 * @hw_dst: the Hardware destination in the ARP Header
777 * @ethhdr: pointer to the Ethernet header of the claim frame
778 *
779 * checks if it is a claim packet and if its on the same group.
780 * This function also applies the group ID of the sender
781 * if it is in the same mesh.
782 *
783 * returns:
784 * 2 - if it is a claim packet and on the same group
785 * 1 - if is a claim packet from another group
786 * 0 - if it is not a claim packet
787 */
batadv_check_claim_group(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,uint8_t * hw_src,uint8_t * hw_dst,struct ethhdr * ethhdr)788 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
789 struct batadv_hard_iface *primary_if,
790 uint8_t *hw_src, uint8_t *hw_dst,
791 struct ethhdr *ethhdr)
792 {
793 uint8_t *backbone_addr;
794 struct batadv_orig_node *orig_node;
795 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
796
797 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
798 bla_dst_own = &bat_priv->bla.claim_dest;
799
800 /* if announcement packet, use the source,
801 * otherwise assume it is in the hw_src
802 */
803 switch (bla_dst->type) {
804 case BATADV_CLAIM_TYPE_CLAIM:
805 backbone_addr = hw_src;
806 break;
807 case BATADV_CLAIM_TYPE_REQUEST:
808 case BATADV_CLAIM_TYPE_ANNOUNCE:
809 case BATADV_CLAIM_TYPE_UNCLAIM:
810 backbone_addr = ethhdr->h_source;
811 break;
812 default:
813 return 0;
814 }
815
816 /* don't accept claim frames from ourselves */
817 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
818 return 0;
819
820 /* if its already the same group, it is fine. */
821 if (bla_dst->group == bla_dst_own->group)
822 return 2;
823
824 /* lets see if this originator is in our mesh */
825 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
826
827 /* dont accept claims from gateways which are not in
828 * the same mesh or group.
829 */
830 if (!orig_node)
831 return 1;
832
833 /* if our mesh friends mac is bigger, use it for ourselves. */
834 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
835 batadv_dbg(BATADV_DBG_BLA, bat_priv,
836 "taking other backbones claim group: %#.4x\n",
837 ntohs(bla_dst->group));
838 bla_dst_own->group = bla_dst->group;
839 }
840
841 batadv_orig_node_free_ref(orig_node);
842
843 return 2;
844 }
845
846 /**
847 * batadv_bla_process_claim
848 * @bat_priv: the bat priv with all the soft interface information
849 * @primary_if: the primary hard interface of this batman soft interface
850 * @skb: the frame to be checked
851 *
852 * Check if this is a claim frame, and process it accordingly.
853 *
854 * returns 1 if it was a claim frame, otherwise return 0 to
855 * tell the callee that it can use the frame on its own.
856 */
batadv_bla_process_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct sk_buff * skb)857 static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
858 struct batadv_hard_iface *primary_if,
859 struct sk_buff *skb)
860 {
861 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
862 uint8_t *hw_src, *hw_dst;
863 struct vlan_hdr *vhdr, vhdr_buf;
864 struct ethhdr *ethhdr;
865 struct arphdr *arphdr;
866 unsigned short vid;
867 int vlan_depth = 0;
868 __be16 proto;
869 int headlen;
870 int ret;
871
872 vid = batadv_get_vid(skb, 0);
873 ethhdr = eth_hdr(skb);
874
875 proto = ethhdr->h_proto;
876 headlen = ETH_HLEN;
877 if (vid & BATADV_VLAN_HAS_TAG) {
878 /* Traverse the VLAN/Ethertypes.
879 *
880 * At this point it is known that the first protocol is a VLAN
881 * header, so start checking at the encapsulated protocol.
882 *
883 * The depth of the VLAN headers is recorded to drop BLA claim
884 * frames encapsulated into multiple VLAN headers (QinQ).
885 */
886 do {
887 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
888 &vhdr_buf);
889 if (!vhdr)
890 return 0;
891
892 proto = vhdr->h_vlan_encapsulated_proto;
893 headlen += VLAN_HLEN;
894 vlan_depth++;
895 } while (proto == htons(ETH_P_8021Q));
896 }
897
898 if (proto != htons(ETH_P_ARP))
899 return 0; /* not a claim frame */
900
901 /* this must be a ARP frame. check if it is a claim. */
902
903 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
904 return 0;
905
906 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
907 ethhdr = eth_hdr(skb);
908 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
909
910 /* Check whether the ARP frame carries a valid
911 * IP information
912 */
913 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
914 return 0;
915 if (arphdr->ar_pro != htons(ETH_P_IP))
916 return 0;
917 if (arphdr->ar_hln != ETH_ALEN)
918 return 0;
919 if (arphdr->ar_pln != 4)
920 return 0;
921
922 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
923 hw_dst = hw_src + ETH_ALEN + 4;
924 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
925 bla_dst_own = &bat_priv->bla.claim_dest;
926
927 /* check if it is a claim frame in general */
928 if (memcmp(bla_dst->magic, bla_dst_own->magic,
929 sizeof(bla_dst->magic)) != 0)
930 return 0;
931
932 /* check if there is a claim frame encapsulated deeper in (QinQ) and
933 * drop that, as this is not supported by BLA but should also not be
934 * sent via the mesh.
935 */
936 if (vlan_depth > 1)
937 return 1;
938
939 /* check if it is a claim frame. */
940 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
941 ethhdr);
942 if (ret == 1)
943 batadv_dbg(BATADV_DBG_BLA, bat_priv,
944 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
945 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
946 hw_dst);
947
948 if (ret < 2)
949 return ret;
950
951 /* become a backbone gw ourselves on this vlan if not happened yet */
952 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
953
954 /* check for the different types of claim frames ... */
955 switch (bla_dst->type) {
956 case BATADV_CLAIM_TYPE_CLAIM:
957 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
958 ethhdr->h_source, vid))
959 return 1;
960 break;
961 case BATADV_CLAIM_TYPE_UNCLAIM:
962 if (batadv_handle_unclaim(bat_priv, primary_if,
963 ethhdr->h_source, hw_src, vid))
964 return 1;
965 break;
966
967 case BATADV_CLAIM_TYPE_ANNOUNCE:
968 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
969 vid))
970 return 1;
971 break;
972 case BATADV_CLAIM_TYPE_REQUEST:
973 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
974 vid))
975 return 1;
976 break;
977 }
978
979 batadv_dbg(BATADV_DBG_BLA, bat_priv,
980 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
981 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
982 return 1;
983 }
984
985 /* Check when we last heard from other nodes, and remove them in case of
986 * a time out, or clean all backbone gws if now is set.
987 */
batadv_bla_purge_backbone_gw(struct batadv_priv * bat_priv,int now)988 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
989 {
990 struct batadv_bla_backbone_gw *backbone_gw;
991 struct hlist_node *node_tmp;
992 struct hlist_head *head;
993 struct batadv_hashtable *hash;
994 spinlock_t *list_lock; /* protects write access to the hash lists */
995 int i;
996
997 hash = bat_priv->bla.backbone_hash;
998 if (!hash)
999 return;
1000
1001 for (i = 0; i < hash->size; i++) {
1002 head = &hash->table[i];
1003 list_lock = &hash->list_locks[i];
1004
1005 spin_lock_bh(list_lock);
1006 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1007 head, hash_entry) {
1008 if (now)
1009 goto purge_now;
1010 if (!batadv_has_timed_out(backbone_gw->lasttime,
1011 BATADV_BLA_BACKBONE_TIMEOUT))
1012 continue;
1013
1014 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1015 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1016 backbone_gw->orig);
1017
1018 purge_now:
1019 /* don't wait for the pending request anymore */
1020 if (atomic_read(&backbone_gw->request_sent))
1021 atomic_dec(&bat_priv->bla.num_requests);
1022
1023 batadv_bla_del_backbone_claims(backbone_gw);
1024
1025 hlist_del_rcu(&backbone_gw->hash_entry);
1026 batadv_backbone_gw_free_ref(backbone_gw);
1027 }
1028 spin_unlock_bh(list_lock);
1029 }
1030 }
1031
1032 /**
1033 * batadv_bla_purge_claims
1034 * @bat_priv: the bat priv with all the soft interface information
1035 * @primary_if: the selected primary interface, may be NULL if now is set
1036 * @now: whether the whole hash shall be wiped now
1037 *
1038 * Check when we heard last time from our own claims, and remove them in case of
1039 * a time out, or clean all claims if now is set
1040 */
batadv_bla_purge_claims(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,int now)1041 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1042 struct batadv_hard_iface *primary_if,
1043 int now)
1044 {
1045 struct batadv_bla_claim *claim;
1046 struct hlist_head *head;
1047 struct batadv_hashtable *hash;
1048 int i;
1049
1050 hash = bat_priv->bla.claim_hash;
1051 if (!hash)
1052 return;
1053
1054 for (i = 0; i < hash->size; i++) {
1055 head = &hash->table[i];
1056
1057 rcu_read_lock();
1058 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1059 if (now)
1060 goto purge_now;
1061 if (!batadv_compare_eth(claim->backbone_gw->orig,
1062 primary_if->net_dev->dev_addr))
1063 continue;
1064 if (!batadv_has_timed_out(claim->lasttime,
1065 BATADV_BLA_CLAIM_TIMEOUT))
1066 continue;
1067
1068 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1069 "bla_purge_claims(): %pM, vid %d, time out\n",
1070 claim->addr, claim->vid);
1071
1072 purge_now:
1073 batadv_handle_unclaim(bat_priv, primary_if,
1074 claim->backbone_gw->orig,
1075 claim->addr, claim->vid);
1076 }
1077 rcu_read_unlock();
1078 }
1079 }
1080
1081 /**
1082 * batadv_bla_update_orig_address
1083 * @bat_priv: the bat priv with all the soft interface information
1084 * @primary_if: the new selected primary_if
1085 * @oldif: the old primary interface, may be NULL
1086 *
1087 * Update the backbone gateways when the own orig address changes.
1088 */
batadv_bla_update_orig_address(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct batadv_hard_iface * oldif)1089 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1090 struct batadv_hard_iface *primary_if,
1091 struct batadv_hard_iface *oldif)
1092 {
1093 struct batadv_bla_backbone_gw *backbone_gw;
1094 struct hlist_head *head;
1095 struct batadv_hashtable *hash;
1096 __be16 group;
1097 int i;
1098
1099 /* reset bridge loop avoidance group id */
1100 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1101 bat_priv->bla.claim_dest.group = group;
1102
1103 /* purge everything when bridge loop avoidance is turned off */
1104 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1105 oldif = NULL;
1106
1107 if (!oldif) {
1108 batadv_bla_purge_claims(bat_priv, NULL, 1);
1109 batadv_bla_purge_backbone_gw(bat_priv, 1);
1110 return;
1111 }
1112
1113 hash = bat_priv->bla.backbone_hash;
1114 if (!hash)
1115 return;
1116
1117 for (i = 0; i < hash->size; i++) {
1118 head = &hash->table[i];
1119
1120 rcu_read_lock();
1121 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1122 /* own orig still holds the old value. */
1123 if (!batadv_compare_eth(backbone_gw->orig,
1124 oldif->net_dev->dev_addr))
1125 continue;
1126
1127 ether_addr_copy(backbone_gw->orig,
1128 primary_if->net_dev->dev_addr);
1129 /* send an announce frame so others will ask for our
1130 * claims and update their tables.
1131 */
1132 batadv_bla_send_announce(bat_priv, backbone_gw);
1133 }
1134 rcu_read_unlock();
1135 }
1136 }
1137
1138 /* periodic work to do:
1139 * * purge structures when they are too old
1140 * * send announcements
1141 */
batadv_bla_periodic_work(struct work_struct * work)1142 static void batadv_bla_periodic_work(struct work_struct *work)
1143 {
1144 struct delayed_work *delayed_work;
1145 struct batadv_priv *bat_priv;
1146 struct batadv_priv_bla *priv_bla;
1147 struct hlist_head *head;
1148 struct batadv_bla_backbone_gw *backbone_gw;
1149 struct batadv_hashtable *hash;
1150 struct batadv_hard_iface *primary_if;
1151 int i;
1152
1153 delayed_work = container_of(work, struct delayed_work, work);
1154 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1155 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1156 primary_if = batadv_primary_if_get_selected(bat_priv);
1157 if (!primary_if)
1158 goto out;
1159
1160 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1161 batadv_bla_purge_backbone_gw(bat_priv, 0);
1162
1163 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1164 goto out;
1165
1166 hash = bat_priv->bla.backbone_hash;
1167 if (!hash)
1168 goto out;
1169
1170 for (i = 0; i < hash->size; i++) {
1171 head = &hash->table[i];
1172
1173 rcu_read_lock();
1174 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1175 if (!batadv_compare_eth(backbone_gw->orig,
1176 primary_if->net_dev->dev_addr))
1177 continue;
1178
1179 backbone_gw->lasttime = jiffies;
1180
1181 batadv_bla_send_announce(bat_priv, backbone_gw);
1182
1183 /* request_sent is only set after creation to avoid
1184 * problems when we are not yet known as backbone gw
1185 * in the backbone.
1186 *
1187 * We can reset this now after we waited some periods
1188 * to give bridge forward delays and bla group forming
1189 * some grace time.
1190 */
1191
1192 if (atomic_read(&backbone_gw->request_sent) == 0)
1193 continue;
1194
1195 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1196 continue;
1197
1198 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1199 atomic_set(&backbone_gw->request_sent, 0);
1200 }
1201 rcu_read_unlock();
1202 }
1203 out:
1204 if (primary_if)
1205 batadv_hardif_free_ref(primary_if);
1206
1207 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1208 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1209 }
1210
1211 /* The hash for claim and backbone hash receive the same key because they
1212 * are getting initialized by hash_new with the same key. Reinitializing
1213 * them with to different keys to allow nested locking without generating
1214 * lockdep warnings
1215 */
1216 static struct lock_class_key batadv_claim_hash_lock_class_key;
1217 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1218
1219 /* initialize all bla structures */
batadv_bla_init(struct batadv_priv * bat_priv)1220 int batadv_bla_init(struct batadv_priv *bat_priv)
1221 {
1222 int i;
1223 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1224 struct batadv_hard_iface *primary_if;
1225 uint16_t crc;
1226 unsigned long entrytime;
1227
1228 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1229
1230 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1231
1232 /* setting claim destination address */
1233 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1234 bat_priv->bla.claim_dest.type = 0;
1235 primary_if = batadv_primary_if_get_selected(bat_priv);
1236 if (primary_if) {
1237 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1238 bat_priv->bla.claim_dest.group = htons(crc);
1239 batadv_hardif_free_ref(primary_if);
1240 } else {
1241 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1242 }
1243
1244 /* initialize the duplicate list */
1245 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1246 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1247 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1248 bat_priv->bla.bcast_duplist_curr = 0;
1249
1250 if (bat_priv->bla.claim_hash)
1251 return 0;
1252
1253 bat_priv->bla.claim_hash = batadv_hash_new(128);
1254 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1255
1256 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1257 return -ENOMEM;
1258
1259 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1260 &batadv_claim_hash_lock_class_key);
1261 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1262 &batadv_backbone_hash_lock_class_key);
1263
1264 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1265
1266 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1267
1268 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1269 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1270 return 0;
1271 }
1272
1273 /**
1274 * batadv_bla_check_bcast_duplist
1275 * @bat_priv: the bat priv with all the soft interface information
1276 * @skb: contains the bcast_packet to be checked
1277 *
1278 * check if it is on our broadcast list. Another gateway might
1279 * have sent the same packet because it is connected to the same backbone,
1280 * so we have to remove this duplicate.
1281 *
1282 * This is performed by checking the CRC, which will tell us
1283 * with a good chance that it is the same packet. If it is furthermore
1284 * sent by another host, drop it. We allow equal packets from
1285 * the same host however as this might be intended.
1286 */
batadv_bla_check_bcast_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb)1287 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1288 struct sk_buff *skb)
1289 {
1290 int i, curr, ret = 0;
1291 __be32 crc;
1292 struct batadv_bcast_packet *bcast_packet;
1293 struct batadv_bcast_duplist_entry *entry;
1294
1295 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1296
1297 /* calculate the crc ... */
1298 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1299
1300 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1301
1302 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1303 curr = (bat_priv->bla.bcast_duplist_curr + i);
1304 curr %= BATADV_DUPLIST_SIZE;
1305 entry = &bat_priv->bla.bcast_duplist[curr];
1306
1307 /* we can stop searching if the entry is too old ;
1308 * later entries will be even older
1309 */
1310 if (batadv_has_timed_out(entry->entrytime,
1311 BATADV_DUPLIST_TIMEOUT))
1312 break;
1313
1314 if (entry->crc != crc)
1315 continue;
1316
1317 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1318 continue;
1319
1320 /* this entry seems to match: same crc, not too old,
1321 * and from another gw. therefore return 1 to forbid it.
1322 */
1323 ret = 1;
1324 goto out;
1325 }
1326 /* not found, add a new entry (overwrite the oldest entry)
1327 * and allow it, its the first occurrence.
1328 */
1329 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1330 curr %= BATADV_DUPLIST_SIZE;
1331 entry = &bat_priv->bla.bcast_duplist[curr];
1332 entry->crc = crc;
1333 entry->entrytime = jiffies;
1334 ether_addr_copy(entry->orig, bcast_packet->orig);
1335 bat_priv->bla.bcast_duplist_curr = curr;
1336
1337 out:
1338 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1339
1340 return ret;
1341 }
1342
1343 /**
1344 * batadv_bla_is_backbone_gw_orig
1345 * @bat_priv: the bat priv with all the soft interface information
1346 * @orig: originator mac address
1347 * @vid: VLAN identifier
1348 *
1349 * Check if the originator is a gateway for the VLAN identified by vid.
1350 *
1351 * Returns true if orig is a backbone for this vid, false otherwise.
1352 */
batadv_bla_is_backbone_gw_orig(struct batadv_priv * bat_priv,uint8_t * orig,unsigned short vid)1353 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
1354 unsigned short vid)
1355 {
1356 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1357 struct hlist_head *head;
1358 struct batadv_bla_backbone_gw *backbone_gw;
1359 int i;
1360
1361 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1362 return false;
1363
1364 if (!hash)
1365 return false;
1366
1367 for (i = 0; i < hash->size; i++) {
1368 head = &hash->table[i];
1369
1370 rcu_read_lock();
1371 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1372 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1373 backbone_gw->vid == vid) {
1374 rcu_read_unlock();
1375 return true;
1376 }
1377 }
1378 rcu_read_unlock();
1379 }
1380
1381 return false;
1382 }
1383
1384 /**
1385 * batadv_bla_is_backbone_gw
1386 * @skb: the frame to be checked
1387 * @orig_node: the orig_node of the frame
1388 * @hdr_size: maximum length of the frame
1389 *
1390 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1391 * if the orig_node is also a gateway on the soft interface, otherwise it
1392 * returns 0.
1393 */
batadv_bla_is_backbone_gw(struct sk_buff * skb,struct batadv_orig_node * orig_node,int hdr_size)1394 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1395 struct batadv_orig_node *orig_node, int hdr_size)
1396 {
1397 struct batadv_bla_backbone_gw *backbone_gw;
1398 unsigned short vid;
1399
1400 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1401 return 0;
1402
1403 /* first, find out the vid. */
1404 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1405 return 0;
1406
1407 vid = batadv_get_vid(skb, hdr_size);
1408
1409 /* see if this originator is a backbone gw for this VLAN */
1410 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1411 orig_node->orig, vid);
1412 if (!backbone_gw)
1413 return 0;
1414
1415 batadv_backbone_gw_free_ref(backbone_gw);
1416 return 1;
1417 }
1418
1419 /* free all bla structures (for softinterface free or module unload) */
batadv_bla_free(struct batadv_priv * bat_priv)1420 void batadv_bla_free(struct batadv_priv *bat_priv)
1421 {
1422 struct batadv_hard_iface *primary_if;
1423
1424 cancel_delayed_work_sync(&bat_priv->bla.work);
1425 primary_if = batadv_primary_if_get_selected(bat_priv);
1426
1427 if (bat_priv->bla.claim_hash) {
1428 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1429 batadv_hash_destroy(bat_priv->bla.claim_hash);
1430 bat_priv->bla.claim_hash = NULL;
1431 }
1432 if (bat_priv->bla.backbone_hash) {
1433 batadv_bla_purge_backbone_gw(bat_priv, 1);
1434 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1435 bat_priv->bla.backbone_hash = NULL;
1436 }
1437 if (primary_if)
1438 batadv_hardif_free_ref(primary_if);
1439 }
1440
1441 /**
1442 * batadv_bla_rx
1443 * @bat_priv: the bat priv with all the soft interface information
1444 * @skb: the frame to be checked
1445 * @vid: the VLAN ID of the frame
1446 * @is_bcast: the packet came in a broadcast packet type.
1447 *
1448 * bla_rx avoidance checks if:
1449 * * we have to race for a claim
1450 * * if the frame is allowed on the LAN
1451 *
1452 * in these cases, the skb is further handled by this function and
1453 * returns 1, otherwise it returns 0 and the caller shall further
1454 * process the skb.
1455 */
batadv_bla_rx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,bool is_bcast)1456 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1457 unsigned short vid, bool is_bcast)
1458 {
1459 struct ethhdr *ethhdr;
1460 struct batadv_bla_claim search_claim, *claim = NULL;
1461 struct batadv_hard_iface *primary_if;
1462 int ret;
1463
1464 ethhdr = eth_hdr(skb);
1465
1466 primary_if = batadv_primary_if_get_selected(bat_priv);
1467 if (!primary_if)
1468 goto handled;
1469
1470 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1471 goto allow;
1472
1473 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1474 /* don't allow broadcasts while requests are in flight */
1475 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1476 goto handled;
1477
1478 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1479 search_claim.vid = vid;
1480 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1481
1482 if (!claim) {
1483 /* possible optimization: race for a claim */
1484 /* No claim exists yet, claim it for us!
1485 */
1486 batadv_handle_claim(bat_priv, primary_if,
1487 primary_if->net_dev->dev_addr,
1488 ethhdr->h_source, vid);
1489 goto allow;
1490 }
1491
1492 /* if it is our own claim ... */
1493 if (batadv_compare_eth(claim->backbone_gw->orig,
1494 primary_if->net_dev->dev_addr)) {
1495 /* ... allow it in any case */
1496 claim->lasttime = jiffies;
1497 goto allow;
1498 }
1499
1500 /* if it is a broadcast ... */
1501 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1502 /* ... drop it. the responsible gateway is in charge.
1503 *
1504 * We need to check is_bcast because with the gateway
1505 * feature, broadcasts (like DHCP requests) may be sent
1506 * using a unicast packet type.
1507 */
1508 goto handled;
1509 } else {
1510 /* seems the client considers us as its best gateway.
1511 * send a claim and update the claim table
1512 * immediately.
1513 */
1514 batadv_handle_claim(bat_priv, primary_if,
1515 primary_if->net_dev->dev_addr,
1516 ethhdr->h_source, vid);
1517 goto allow;
1518 }
1519 allow:
1520 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1521 ret = 0;
1522 goto out;
1523
1524 handled:
1525 kfree_skb(skb);
1526 ret = 1;
1527
1528 out:
1529 if (primary_if)
1530 batadv_hardif_free_ref(primary_if);
1531 if (claim)
1532 batadv_claim_free_ref(claim);
1533 return ret;
1534 }
1535
1536 /**
1537 * batadv_bla_tx
1538 * @bat_priv: the bat priv with all the soft interface information
1539 * @skb: the frame to be checked
1540 * @vid: the VLAN ID of the frame
1541 *
1542 * bla_tx checks if:
1543 * * a claim was received which has to be processed
1544 * * the frame is allowed on the mesh
1545 *
1546 * in these cases, the skb is further handled by this function and
1547 * returns 1, otherwise it returns 0 and the caller shall further
1548 * process the skb.
1549 *
1550 * This call might reallocate skb data.
1551 */
batadv_bla_tx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1552 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1553 unsigned short vid)
1554 {
1555 struct ethhdr *ethhdr;
1556 struct batadv_bla_claim search_claim, *claim = NULL;
1557 struct batadv_hard_iface *primary_if;
1558 int ret = 0;
1559
1560 primary_if = batadv_primary_if_get_selected(bat_priv);
1561 if (!primary_if)
1562 goto out;
1563
1564 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1565 goto allow;
1566
1567 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1568 goto handled;
1569
1570 ethhdr = eth_hdr(skb);
1571
1572 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1573 /* don't allow broadcasts while requests are in flight */
1574 if (is_multicast_ether_addr(ethhdr->h_dest))
1575 goto handled;
1576
1577 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1578 search_claim.vid = vid;
1579
1580 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1581
1582 /* if no claim exists, allow it. */
1583 if (!claim)
1584 goto allow;
1585
1586 /* check if we are responsible. */
1587 if (batadv_compare_eth(claim->backbone_gw->orig,
1588 primary_if->net_dev->dev_addr)) {
1589 /* if yes, the client has roamed and we have
1590 * to unclaim it.
1591 */
1592 batadv_handle_unclaim(bat_priv, primary_if,
1593 primary_if->net_dev->dev_addr,
1594 ethhdr->h_source, vid);
1595 goto allow;
1596 }
1597
1598 /* check if it is a multicast/broadcast frame */
1599 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1600 /* drop it. the responsible gateway has forwarded it into
1601 * the backbone network.
1602 */
1603 goto handled;
1604 } else {
1605 /* we must allow it. at least if we are
1606 * responsible for the DESTINATION.
1607 */
1608 goto allow;
1609 }
1610 allow:
1611 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1612 ret = 0;
1613 goto out;
1614 handled:
1615 ret = 1;
1616 out:
1617 if (primary_if)
1618 batadv_hardif_free_ref(primary_if);
1619 if (claim)
1620 batadv_claim_free_ref(claim);
1621 return ret;
1622 }
1623
batadv_bla_claim_table_seq_print_text(struct seq_file * seq,void * offset)1624 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1625 {
1626 struct net_device *net_dev = (struct net_device *)seq->private;
1627 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1628 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1629 struct batadv_bla_claim *claim;
1630 struct batadv_hard_iface *primary_if;
1631 struct hlist_head *head;
1632 uint32_t i;
1633 bool is_own;
1634 uint8_t *primary_addr;
1635
1636 primary_if = batadv_seq_print_text_primary_if_get(seq);
1637 if (!primary_if)
1638 goto out;
1639
1640 primary_addr = primary_if->net_dev->dev_addr;
1641 seq_printf(seq,
1642 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1643 net_dev->name, primary_addr,
1644 ntohs(bat_priv->bla.claim_dest.group));
1645 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1646 "Client", "VID", "Originator", "CRC");
1647 for (i = 0; i < hash->size; i++) {
1648 head = &hash->table[i];
1649
1650 rcu_read_lock();
1651 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1652 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1653 primary_addr);
1654 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1655 claim->addr, BATADV_PRINT_VID(claim->vid),
1656 claim->backbone_gw->orig,
1657 (is_own ? 'x' : ' '),
1658 claim->backbone_gw->crc);
1659 }
1660 rcu_read_unlock();
1661 }
1662 out:
1663 if (primary_if)
1664 batadv_hardif_free_ref(primary_if);
1665 return 0;
1666 }
1667
batadv_bla_backbone_table_seq_print_text(struct seq_file * seq,void * offset)1668 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1669 {
1670 struct net_device *net_dev = (struct net_device *)seq->private;
1671 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1672 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1673 struct batadv_bla_backbone_gw *backbone_gw;
1674 struct batadv_hard_iface *primary_if;
1675 struct hlist_head *head;
1676 int secs, msecs;
1677 uint32_t i;
1678 bool is_own;
1679 uint8_t *primary_addr;
1680
1681 primary_if = batadv_seq_print_text_primary_if_get(seq);
1682 if (!primary_if)
1683 goto out;
1684
1685 primary_addr = primary_if->net_dev->dev_addr;
1686 seq_printf(seq,
1687 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1688 net_dev->name, primary_addr,
1689 ntohs(bat_priv->bla.claim_dest.group));
1690 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1691 "Originator", "VID", "last seen", "CRC");
1692 for (i = 0; i < hash->size; i++) {
1693 head = &hash->table[i];
1694
1695 rcu_read_lock();
1696 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1697 msecs = jiffies_to_msecs(jiffies -
1698 backbone_gw->lasttime);
1699 secs = msecs / 1000;
1700 msecs = msecs % 1000;
1701
1702 is_own = batadv_compare_eth(backbone_gw->orig,
1703 primary_addr);
1704 if (is_own)
1705 continue;
1706
1707 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
1708 backbone_gw->orig,
1709 BATADV_PRINT_VID(backbone_gw->vid), secs,
1710 msecs, backbone_gw->crc);
1711 }
1712 rcu_read_unlock();
1713 }
1714 out:
1715 if (primary_if)
1716 batadv_hardif_free_ref(primary_if);
1717 return 0;
1718 }
1719